From e9d4952a4cbd6f832cb2a992420c6f6c4198b55d Mon Sep 17 00:00:00 2001 From: jpechane Date: Thu, 19 Dec 2024 14:48:57 +0000 Subject: [PATCH] deploy: d20690dcaa3e1a1ba89ea7710ec281cd119bcec3 --- 404.html | 2 +- archives/index.html | 2 +- archives/page/10/index.html | 2 +- archives/page/11/index.html | 2 +- archives/page/12/index.html | 2 +- archives/page/13/index.html | 2 +- archives/page/14/index.html | 2 +- archives/page/15/index.html | 2 +- archives/page/16/index.html | 2 +- archives/page/17/index.html | 2 +- archives/page/18/index.html | 2 +- archives/page/19/index.html | 2 +- archives/page/2/index.html | 2 +- archives/page/20/index.html | 2 +- archives/page/21/index.html | 2 +- archives/page/22/index.html | 2 +- archives/page/23/index.html | 2 +- archives/page/24/index.html | 2 +- archives/page/25/index.html | 2 +- archives/page/26/index.html | 2 +- archives/page/27/index.html | 2 +- archives/page/28/index.html | 2 +- archives/page/29/index.html | 2 +- archives/page/3/index.html | 2 +- archives/page/30/index.html | 2 +- archives/page/31/index.html | 2 +- archives/page/32/index.html | 2 +- archives/page/33/index.html | 2 +- archives/page/34/index.html | 2 +- archives/page/35/index.html | 2 +- archives/page/36/index.html | 2 +- archives/page/37/index.html | 2 +- archives/page/38/index.html | 2 +- archives/page/39/index.html | 2 +- archives/page/4/index.html | 2 +- archives/page/40/index.html | 2 +- archives/page/41/index.html | 2 +- archives/page/42/index.html | 2 +- archives/page/43/index.html | 2 +- archives/page/44/index.html | 2 +- archives/page/45/index.html | 2 +- archives/page/46/index.html | 2 +- archives/page/47/index.html | 2 +- archives/page/48/index.html | 2 +- archives/page/49/index.html | 2 +- archives/page/5/index.html | 2 +- archives/page/50/index.html | 2 +- archives/page/51/index.html | 2 +- archives/page/52/index.html | 2 +- archives/page/53/index.html | 2 +- archives/page/54/index.html | 2 +- archives/page/6/index.html | 2 +- archives/page/7/index.html | 2 +- archives/page/8/index.html | 2 +- archives/page/9/index.html | 2 +- .../03/18/Debezium-0-1-Released/index.html | 2 +- blog/2016/04/14/Debezium-website/index.html | 2 +- blog/2016/04/15/parsing-ddl/index.html | 2 +- .../05/31/Debezium-on-Kubernetes/index.html | 2 +- .../06/10/Debezium-0.2.1-Released/index.html | 2 +- .../06/22/Debezium-0-2-2-Released/index.html | 2 +- .../07/26/Debezium-0-2-3-Released/index.html | 2 +- .../capturing-changes-from-mysql/index.html | 2 +- .../08/16/Debezium-0-2-4-Released/index.html | 2 +- .../08/16/Debezium-0-3-0-Released/index.html | 2 +- .../08/30/Debezium-0-3-1-Released/index.html | 2 +- .../index.html | 2 +- .../09/26/Debezium-0-3-2-Released/index.html | 2 +- .../10/18/Debezium-0-3-3-Released/index.html | 2 +- .../index.html | 2 +- .../10/25/Debezium-0-3-4-Released/index.html | 2 +- .../11/14/Debezium-0-3-5-Released/index.html | 2 +- .../12/21/Debezium-0-3-6-Released/index.html | 2 +- .../02/07/Debezium-0-4-0-Released/index.html | 2 +- .../02/08/Support-for-Postgresql/index.html | 2 +- blog/2017/02/22/Debezium-at-WePay/index.html | 2 +- .../03/17/Debezium-0-4-1-Released/index.html | 2 +- .../03/27/Debezium-0-5-0-Released/index.html | 2 +- blog/2017/04/26/Debezium-evolving/index.html | 2 +- blog/2017/04/27/hello-debezium/index.html | 2 +- .../06/12/debezium-0-5-1-released/index.html | 2 +- .../08/17/debezium-0-5-2-is-out/index.html | 2 +- .../09/21/debezium-0-6-0-released/index.html | 2 +- .../streaming-to-another-database/index.html | 2 +- .../10/26/debezium-0-6-1-released/index.html | 2 +- .../11/debezium-at-devoxx-belgium/index.html | 2 +- .../11/15/debezium-0-6-2-released/index.html | 2 +- .../12/15/debezium-0-7-0-released/index.html | 2 +- .../12/20/debezium-0-7-1-released/index.html | 2 +- .../17/streaming-to-elasticsearch/index.html | 2 +- .../01/25/debezium-0-7-2-released/index.html | 2 +- .../02/15/debezium-0-7-3-released/index.html | 2 +- .../03/07/debezium-0-7-4-released/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../03/20/debezium-0-7-5-released/index.html | 2 +- .../index.html | 2 +- .../debezium-0-8-0-beta1-released/index.html | 2 +- .../04/debezium-0-8-0-cr1-released/index.html | 2 +- .../debezium-0-8-0-final-released/index.html | 2 +- .../index.html | 2 +- .../debezium-0-9-0-alpha1-released/index.html | 2 +- .../08/30/debezium-0-8-2-released/index.html | 2 +- .../index.html | 2 +- .../debezium-0-8-3-final-released/index.html | 2 +- .../index.html | 2 +- .../debezium-0-9-0-alpha2-released/index.html | 2 +- .../debezium-0-9-0-beta1-released/index.html | 2 +- .../index.html | 2 +- .../debezium-0-9-0-beta2-released/index.html | 2 +- .../28/debezium-0-9-0-cr1-released/index.html | 2 +- .../debezium-0-9-0-final-released/index.html | 2 +- .../debezium-0-9-1-final-released/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../debezium-0-9-2-final-released/index.html | 2 +- .../03/14/debezium-meets-quarkus/index.html | 2 +- .../debezium-0-9-3-final-released/index.html | 2 +- .../debezium-0-9-4-final-released/index.html | 2 +- blog/2019/04/18/hello-debezium/index.html | 2 +- .../debezium-0-9-5-final-released/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../05/debezium-newsletter-01-2019/index.html | 2 +- .../debezium-0-10-0-beta1-released/index.html | 2 +- .../06/19/debezium-wears-fedora/index.html | 2 +- .../debezium-0-10-0-beta2-released/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../debezium-0-10-0-beta3-released/index.html | 2 +- .../debezium-0-10-0-beta4-released/index.html | 2 +- .../website-documentation-overhaul/index.html | 2 +- .../debezium-0-10-0-cr1-released/index.html | 2 +- .../debezium-0-10-0-cr2-released/index.html | 2 +- .../index.html | 2 +- .../debezium-0-10-0-final-released/index.html | 2 +- .../index.html | 2 +- .../debezium-1-0-0-beta1-released/index.html | 2 +- .../17/debezium-newsletter-02-2019/index.html | 2 +- .../10/22/audit-logs-with-kogito/index.html | 2 +- .../debezium-1-0-0-beta2-released/index.html | 2 +- .../debezium-1-0-0-beta3-released/index.html | 2 +- .../12/debezium-1-0-0-cr1-released/index.html | 2 +- .../12/13/externalized-secrets/index.html | 2 +- .../debezium-1-0-0-final-released/index.html | 2 +- .../debezium-1-1-alpha1-released/index.html | 2 +- .../01/22/outbox-quarkus-extension/index.html | 2 +- .../02/10/event-sourcing-vs-cdc/index.html | 2 +- .../11/debezium-1-1-beta1-released/index.html | 2 +- .../13/debezium-1-1-beta2-released/index.html | 2 +- .../19/debezium-camel-integration/index.html | 2 +- .../index.html | 2 +- blog/2020/03/05/db2-cdc-approaches/index.html | 2 +- .../03/13/debezium-1-1-c1-released/index.html | 2 +- .../index.html | 2 +- .../24/debezium-1-1-final-released/index.html | 2 +- .../31/debezium-newsletter-01-2020/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../debezium-1-2-alpha1-released/index.html | 2 +- .../07/debezium-1-2-beta1-released/index.html | 2 +- .../19/debezium-1-2-beta2-released/index.html | 2 +- .../11/debezium-1-2-cr1-released/index.html | 2 +- .../24/debezium-1-2-final-released/index.html | 2 +- .../debezium-1-2-1-final-released/index.html | 2 +- blog/2020/07/28/hello-debezium/index.html | 2 +- .../debezium-1-3-alpha1-released/index.html | 2 +- .../03/debezium-1-3-beta1-released/index.html | 2 +- .../15/debezium-auto-create-topics/index.html | 2 +- .../16/debezium-1-3-beta2-released/index.html | 2 +- .../24/debezium-1-3-cr1-released/index.html | 2 +- .../01/debezium-1-3-final-released/index.html | 2 +- .../index.html | 2 +- .../2020/10/22/towards-debezium-ui/index.html | 2 +- .../debezium-1-4-alpha1-released/index.html | 2 +- blog/2020/10/27/hello-debezium/index.html | 2 +- .../11/04/streaming-vitess-at-bolt/index.html | 2 +- .../debezium-1-3-1-final-released/index.html | 2 +- .../debezium-1-4-alpha2-released/index.html | 2 +- .../09/debezium-1-4-beta1-released/index.html | 2 +- .../index.html | 2 +- .../17/debezium-1-4-cr1-released/index.html | 2 +- .../2021/01/06/debezium-2020-recap/index.html | 2 +- .../07/debezium-1-4-final-released/index.html | 2 +- .../debezium-1-4-1-final-released/index.html | 2 +- .../debezium-1-5-alpha1-released/index.html | 2 +- .../24/debezium-1-5-beta1-released/index.html | 2 +- .../15/debezium-1-5-beta2-released/index.html | 2 +- .../index.html | 2 +- .../24/debezium-1-5-cr1-released/index.html | 2 +- .../08/debezium-1-5-final-released/index.html | 2 +- .../debezium-1-6-alpha1-released/index.html | 2 +- .../20/debezium-1-6-beta1-released/index.html | 2 +- .../debezium-1-5-1-final-released/index.html | 2 +- .../debezium-1-5-2-final-released/index.html | 2 +- .../10/debezium-1-6-beta2-released/index.html | 2 +- .../24/debezium-1-6-cr1-released/index.html | 2 +- .../30/debezium-1-6-final-released/index.html | 2 +- .../07/debezium-newsletter-01-2021/index.html | 2 +- .../debezium-1-7-alpha1-released/index.html | 2 +- .../08/12/introducing-debezium-ui/index.html | 2 +- .../index.html | 2 +- .../25/debezium-1-7-beta1-released/index.html | 2 +- .../index.html | 2 +- .../16/debezium-1-7-cr1-released/index.html | 2 +- .../index.html | 2 +- .../23/debezium-1-7-cr2-released/index.html | 2 +- .../04/debezium-1-7-final-released/index.html | 2 +- .../10/07/incremental-snapshots/index.html | 2 +- .../index.html | 2 +- .../debezium-1-8-alpha1-released/index.html | 2 +- .../debezium-1.8-alpha2-released/index.html | 2 +- .../11/23/debezium-ui-transforms/index.html | 2 +- .../30/debezium-1.8-beta1-released/index.html | 2 +- .../12/02/debezium-ui-topic-groups/index.html | 2 +- .../09/debezium-1.8-cr1-released/index.html | 2 +- .../12/14/note-on-log4j-security/index.html | 2 +- .../16/debezium-1.8-final-released/index.html | 2 +- .../debezium-1-9-alpha1-released/index.html | 2 +- .../debezium-1-9-alpha2-released/index.html | 2 +- .../03/debezium-1-9-beta1-released/index.html | 2 +- blog/2022/03/15/hello-debezium/index.html | 2 +- .../25/debezium-1-9-cr1-released/index.html | 2 +- .../06/debezium-1.9-final-released/index.html | 2 +- .../index.html | 2 +- .../debezium-1.9.1-final-released/index.html | 2 +- .../debezium-2.0-alpha1-released/index.html | 2 +- blog/2022/05/04/switch-to-java-11/index.html | 2 +- .../debezium-1-9-3-final-released/index.html | 2 +- .../debezium-2.0-alpha2-released/index.html | 2 +- .../debezium-1-9-4-final-released/index.html | 2 +- .../debezium-2.0-alpha2-released/index.html | 2 +- .../debezium-2.0-alpha3-released/index.html | 2 +- .../debezium-1-9-5-final-released/index.html | 2 +- .../27/debezium-2.0-beta1-released/index.html | 2 +- .../16/debezium-2.0-beta2-released/index.html | 2 +- .../debezium-1-9-6-final-released/index.html | 2 +- .../debezium-oracle-series-part-1/index.html | 2 +- .../debezium-oracle-series-part-2/index.html | 2 +- .../10/debezium-2.0-cr1-released/index.html | 2 +- .../17/debezium-2-0-final-released/index.html | 2 +- blog/2022/10/20/flaky-tests/index.html | 2 +- .../debezium-1-9-7-final-released/index.html | 2 +- blog/2022/10/26/debezium-evolving/index.html | 2 +- .../debezium-2-1-alpha1-released/index.html | 2 +- blog/2022/11/15/filling-the-ranks/index.html | 2 +- .../22/debezium-2-1-final-released/index.html | 2 +- .../index.html | 2 +- .../debezium-2-2-alpha1-released/index.html | 2 +- blog/2023/01/24/we-are-hiring-2/index.html | 2 +- .../debezium-2-1-2-final-released/index.html | 2 +- .../index.html | 2 +- .../debezium-2-2-alpha2-released/index.html | 2 +- .../debezium-2-2-alpha3-released/index.html | 2 +- blog/2023/03/09/hello-debezium/index.html | 2 +- .../03/debezium-2-2-beta1-released/index.html | 2 +- .../17/debezium-2-2-cr1-released/index.html | 2 +- .../20/debezium-2-2-final-released/index.html | 2 +- .../04/25/container-images-quayio/index.html | 2 +- .../index.html | 2 +- .../debezium-2-3-alpha1-released/index.html | 2 +- .../29/debezium-2-3-beta1-released/index.html | 2 +- .../12/debezium-2-3-cr1-released/index.html | 2 +- .../21/debezium-2-3-final-released/index.html | 2 +- .../towards-exactly-once-delivery/index.html | 2 +- .../index.html | 2 +- .../debezium-oracle-series-part-3/index.html | 2 +- .../index.html | 2 +- .../debezium-2-4-alpha1-released/index.html | 2 +- .../debezium-2-3-1-final-released/index.html | 2 +- .../debezium-2-3-2-final-released/index.html | 2 +- .../debezium-2-4-alpha2-released/index.html | 2 +- .../29/debezium-2-4-beta1-released/index.html | 2 +- .../debezium-2-3-3-final-released/index.html | 2 +- .../index.html | 2 +- .../13/debezium-2-4-beta2-released/index.html | 2 +- .../22/debezium-2-4-cr1-released/index.html | 2 +- .../23/flink-spark-online-learning/index.html | 2 +- .../03/debezium-2-4-final-released/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../debezium-2-5-alpha1-released/index.html | 2 +- .../debezium-2-5-alpha2-released/index.html | 2 +- .../debezium-2-4-1-final-released/index.html | 2 +- .../04/debezium-2-5-beta1-released/index.html | 2 +- .../14/debezium-2-5-cr1-released/index.html | 2 +- .../index.html | 2 +- .../21/debezium-2-5-final-released/index.html | 2 +- .../01/11/Debezium-and-TimescaleDB/index.html | 2 +- .../debezium-2.6-alpha1-released/index.html | 2 +- .../debezium-2-5-1-final-released/index.html | 2 +- .../debezium-2.6-alpha2-released/index.html | 2 +- .../debezium-2-5-2-final-released/index.html | 2 +- .../06/debezium-2-6-beta1-released/index.html | 2 +- .../debezium-2-5-3-final-released/index.html | 2 +- .../25/debezium-2-6-cr1-released/index.html | 2 +- .../02/debezium-2-6-final-released/index.html | 2 +- .../debezium-2-6-1-final-released/index.html | 2 +- .../debezium-2-7-alpha1-released/index.html | 2 +- .../debezium-2-7-alpha2-released/index.html | 2 +- .../debezium-2-6-2-final-released/index.html | 2 +- .../06/debezium-2-7-beta1-released/index.html | 2 +- .../01/debezium-2-7-final-released/index.html | 2 +- .../07/08/async-embedded-engine/index.html | 2 +- .../debezium-3.0-alpha1-released/index.html | 2 +- .../debezium-3.0-alpha2-released/index.html | 2 +- blog/2024/08/05/Debezium-ui-update/index.html | 2 +- .../debezium-2-7-1-final-released/index.html | 2 +- .../26/debezium-3.0-beta1-released/index.html | 2 +- .../debezium-2-7-2-final-released/index.html | 2 +- .../16/debezium-3-0-cr1-released/index.html | 2 +- blog/2024/09/18/quay-io-reminder/index.html | 2 +- .../debezium-2-7-3-final-released/index.html | 2 +- .../25/debezium-3-0-cr2-released/index.html | 2 +- .../02/debezium-3-0-final-released/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../community-feedback-survey-2024/index.html | 2 +- .../debezium-3-0-1-final-released/index.html | 2 +- .../debezium-moving-to-commonhaus/index.html | 2 +- .../debezium-3-0-2-final-released/index.html | 2 +- .../debezium-3-0-5-final-released/index.html | 2 +- blog/index.html | 2 +- blog/page/10/index.html | 2 +- blog/page/11/index.html | 2 +- blog/page/12/index.html | 2 +- blog/page/13/index.html | 2 +- blog/page/14/index.html | 2 +- blog/page/15/index.html | 2 +- blog/page/16/index.html | 2 +- blog/page/17/index.html | 2 +- blog/page/18/index.html | 2 +- blog/page/19/index.html | 2 +- blog/page/2/index.html | 2 +- blog/page/20/index.html | 2 +- blog/page/21/index.html | 2 +- blog/page/22/index.html | 2 +- blog/page/23/index.html | 2 +- blog/page/24/index.html | 2 +- blog/page/25/index.html | 2 +- blog/page/26/index.html | 2 +- blog/page/27/index.html | 2 +- blog/page/28/index.html | 2 +- blog/page/29/index.html | 2 +- blog/page/3/index.html | 2 +- blog/page/30/index.html | 2 +- blog/page/31/index.html | 2 +- blog/page/32/index.html | 2 +- blog/page/33/index.html | 2 +- blog/page/34/index.html | 2 +- blog/page/35/index.html | 2 +- blog/page/36/index.html | 2 +- blog/page/37/index.html | 2 +- blog/page/38/index.html | 2 +- blog/page/39/index.html | 2 +- blog/page/4/index.html | 2 +- blog/page/40/index.html | 2 +- blog/page/41/index.html | 2 +- blog/page/42/index.html | 2 +- blog/page/43/index.html | 2 +- blog/page/44/index.html | 2 +- blog/page/45/index.html | 2 +- blog/page/46/index.html | 2 +- blog/page/47/index.html | 2 +- blog/page/48/index.html | 2 +- blog/page/49/index.html | 2 +- blog/page/5/index.html | 2 +- blog/page/50/index.html | 2 +- blog/page/51/index.html | 2 +- blog/page/52/index.html | 2 +- blog/page/53/index.html | 2 +- blog/page/54/index.html | 2 +- blog/page/6/index.html | 2 +- blog/page/7/index.html | 2 +- blog/page/8/index.html | 2 +- blog/page/9/index.html | 2 +- community/code-of-conduct/index.html | 2 +- community/contribute/index.html | 2 +- community/index.html | 2 +- community/users/index.html | 2 +- docs/amq-streams/index.html | 2 +- docs/architecture/index.html | 2 +- docs/code-of-conduct/index.html | 2 +- docs/configuration/avro/index.html | 2 +- .../configuration/event-flattening/index.html | 2 +- docs/configuration/logging/index.html | 2 +- .../mongodb-event-flattening/index.html | 2 +- .../outbox-event-router/index.html | 2 +- docs/configuration/topic-routing/index.html | 2 +- docs/connectors/cassandra/index.html | 2 +- docs/connectors/index.html | 2 +- docs/connectors/mongodb/index.html | 2 +- docs/connectors/mysql/index.html | 2 +- docs/connectors/oracle/index.html | 2 +- docs/connectors/postgresql/index.html | 2 +- docs/connectors/sqlserver/index.html | 2 +- docs/contribute/index.html | 2 +- docs/embedded/index.html | 2 +- docs/faq/index.html | 2 +- docs/features/index.html | 2 +- docs/index.html | 2 +- docs/install/development/index.html | 2 +- docs/install/postgres-plugins/index.html | 2 +- docs/install/stable/index.html | 2 +- docs/monitoring/index.html | 2 +- docs/mysql/index.html | 2 +- docs/online-resources/index.html | 2 +- docs/openshift/index.html | 2 +- docs/releases/index.html | 2 +- docs/roadmap/index.html | 2 +- docs/tutorial/index.html | 2 +- documentation/architecture/index.html | 2 +- documentation/faq/index.html | 2 +- documentation/features/index.html | 2 +- documentation/index.html | 2 +- documentation/install/stable/index.html | 2 +- documentation/online-resources/index.html | 2 +- documentation/sitemap.xml | 1130 ++++++++--------- feed.xml | 2 +- foundation/faq/index.html | 2 +- index.html | 2 +- license/index.html | 2 +- releases/0.10/index.html | 2 +- releases/0.10/release-notes.html | 2 +- releases/0.9/index.html | 2 +- releases/0.9/release-notes.html | 2 +- releases/1.0/index.html | 2 +- releases/1.0/release-notes.html | 2 +- releases/1.1/index.html | 2 +- releases/1.1/release-notes.html | 2 +- releases/1.2/index.html | 2 +- releases/1.2/release-notes.html | 2 +- releases/1.3/index.html | 2 +- releases/1.3/release-notes.html | 2 +- releases/1.4/index.html | 2 +- releases/1.4/release-notes.html | 2 +- releases/1.5/index.html | 2 +- releases/1.5/release-notes.html | 2 +- releases/1.6/index.html | 2 +- releases/1.6/release-notes.html | 2 +- releases/1.7/index.html | 2 +- releases/1.7/release-notes.html | 2 +- releases/1.8/index.html | 2 +- releases/1.8/release-notes.html | 2 +- releases/1.9/index.html | 2 +- releases/1.9/release-notes.html | 2 +- releases/2.0/index.html | 2 +- releases/2.0/release-notes.html | 2 +- releases/2.1/index.html | 2 +- releases/2.1/release-notes.html | 2 +- releases/2.2/index.html | 2 +- releases/2.2/release-notes.html | 2 +- releases/2.3/index.html | 2 +- releases/2.3/release-notes.html | 2 +- releases/2.4/index.html | 2 +- releases/2.4/release-notes.html | 2 +- releases/2.5/index.html | 2 +- releases/2.5/release-notes.html | 2 +- releases/2.6/index.html | 2 +- releases/2.6/release-notes.html | 2 +- releases/2.7/index.html | 2 +- releases/2.7/release-notes.html | 2 +- releases/3.0/index.html | 2 +- releases/3.0/release-notes.html | 2 +- releases/index.html | 2 +- releases/page/10/index.html | 2 +- releases/page/11/index.html | 2 +- releases/page/12/index.html | 2 +- releases/page/13/index.html | 2 +- releases/page/14/index.html | 2 +- releases/page/15/index.html | 2 +- releases/page/16/index.html | 2 +- releases/page/17/index.html | 2 +- releases/page/18/index.html | 2 +- releases/page/19/index.html | 2 +- releases/page/2/index.html | 2 +- releases/page/20/index.html | 2 +- releases/page/21/index.html | 2 +- releases/page/22/index.html | 2 +- releases/page/23/index.html | 2 +- releases/page/24/index.html | 2 +- releases/page/25/index.html | 2 +- releases/page/26/index.html | 2 +- releases/page/27/index.html | 2 +- releases/page/28/index.html | 2 +- releases/page/29/index.html | 2 +- releases/page/3/index.html | 2 +- releases/page/30/index.html | 2 +- releases/page/31/index.html | 2 +- releases/page/32/index.html | 2 +- releases/page/33/index.html | 2 +- releases/page/34/index.html | 2 +- releases/page/35/index.html | 2 +- releases/page/36/index.html | 2 +- releases/page/37/index.html | 2 +- releases/page/38/index.html | 2 +- releases/page/39/index.html | 2 +- releases/page/4/index.html | 2 +- releases/page/40/index.html | 2 +- releases/page/41/index.html | 2 +- releases/page/42/index.html | 2 +- releases/page/43/index.html | 2 +- releases/page/44/index.html | 2 +- releases/page/45/index.html | 2 +- releases/page/46/index.html | 2 +- releases/page/47/index.html | 2 +- releases/page/48/index.html | 2 +- releases/page/49/index.html | 2 +- releases/page/5/index.html | 2 +- releases/page/50/index.html | 2 +- releases/page/51/index.html | 2 +- releases/page/52/index.html | 2 +- releases/page/53/index.html | 2 +- releases/page/54/index.html | 2 +- releases/page/6/index.html | 2 +- releases/page/7/index.html | 2 +- releases/page/8/index.html | 2 +- releases/page/9/index.html | 2 +- roadmap/index.html | 2 +- tag/analytics/index.html | 2 +- tag/announcement discussion survey/index.html | 2 +- tag/announcement/index.html | 2 +- tag/apache kafka/index.html | 2 +- tag/apache-kafka/index.html | 2 +- tag/apicurio/index.html | 2 +- tag/avro/index.html | 2 +- tag/aws/index.html | 2 +- tag/batch/index.html | 2 +- tag/caassandra/index.html | 2 +- tag/camel/index.html | 2 +- tag/cassandra/index.html | 2 +- tag/cdc/index.html | 2 +- tag/channels/index.html | 2 +- tag/community-stories/index.html | 2 +- tag/community/index.html | 2 +- tag/connectors/index.html | 2 +- tag/containers/index.html | 2 +- tag/cqrs/index.html | 2 +- tag/custom/index.html | 2 +- tag/datalake/index.html | 2 +- tag/db2/index.html | 2 +- tag/ddd/index.html | 2 +- tag/debezium-server/index.html | 2 +- tag/debezium-ui/index.html | 2 +- tag/debezium/index.html | 2 +- tag/deduplication/index.html | 2 +- tag/discussion/index.html | 2 +- tag/docker/index.html | 2 +- tag/elasticsearch/index.html | 2 +- tag/event-sourcing/index.html | 2 +- tag/exactly-once-semantics/index.html | 2 +- tag/example/index.html | 2 +- tag/examples/index.html | 2 +- tag/features/index.html | 2 +- tag/fedora/index.html | 2 +- tag/flink/index.html | 2 +- tag/hiring/index.html | 2 +- tag/ibmi/index.html | 2 +- tag/iceberg/index.html | 2 +- tag/images/index.html | 2 +- tag/index.html | 2 +- tag/informix/index.html | 2 +- tag/integration/index.html | 2 +- tag/introduction/index.html | 2 +- tag/jaeger/index.html | 2 +- tag/jdbc/index.html | 2 +- tag/json/index.html | 2 +- tag/kafka streams/index.html | 2 +- tag/kafka-streams/index.html | 2 +- tag/kafka/index.html | 2 +- tag/kogito/index.html | 2 +- tag/ksql/index.html | 2 +- tag/kubernetes/index.html | 2 +- tag/lakehouse/index.html | 2 +- tag/machine-learning/index.html | 2 +- tag/mariadb/index.html | 2 +- tag/metrics/index.html | 2 +- tag/microservices/index.html | 2 +- tag/mongo/index.html | 2 +- tag/mongodb/index.html | 2 +- tag/monitoring/index.html | 2 +- tag/mysql/index.html | 2 +- tag/news/index.html | 2 +- tag/newsletter/index.html | 2 +- tag/notifications/index.html | 2 +- tag/online-learning/index.html | 2 +- tag/operator/index.html | 2 +- tag/oracle/index.html | 2 +- tag/outbox/index.html | 2 +- tag/performance/index.html | 2 +- tag/postgres/index.html | 2 +- tag/presentation/index.html | 2 +- tag/production/index.html | 2 +- tag/quarkus/index.html | 2 +- tag/questdb/index.html | 2 +- tag/rds/index.html | 2 +- tag/releases/index.html | 2 +- tag/schema/index.html | 2 +- tag/scylla/index.html | 2 +- tag/secrets/index.html | 2 +- tag/sentry/index.html | 2 +- tag/serialization/index.html | 2 +- tag/signaling/index.html | 2 +- tag/smt/index.html | 2 +- tag/snapshots/index.html | 2 +- tag/spanner/index.html | 2 +- tag/spark/index.html | 2 +- tag/sql/index.html | 2 +- tag/sqlserver/index.html | 2 +- tag/tensorflow/index.html | 2 +- tag/testcontainers/index.html | 2 +- tag/tests/index.html | 2 +- tag/time series/index.html | 2 +- tag/timescaledb/index.html | 2 +- tag/topics/index.html | 2 +- tag/tracing/index.html | 2 +- tag/transactions/index.html | 2 +- tag/ui/index.html | 2 +- tag/vagrant/index.html | 2 +- tag/vitess/index.html | 2 +- tag/website/index.html | 2 +- 624 files changed, 1188 insertions(+), 1188 deletions(-) diff --git a/404.html b/404.html index d1412a1e8e..52d37fcc03 100644 --- a/404.html +++ b/404.html @@ -1 +1 @@ -

Page Not Found

Sorry, but the page you were trying to view does not exist.



It looks like you have may found a broken link!
Please report it mentioning which page you came from and which link you clicked on by filing a bug here.

You can also fix the problem yourself by sending a pull request to GitHub:
github.com/debezium/debezium.github.io

\ No newline at end of file +

Page Not Found

Sorry, but the page you were trying to view does not exist.



It looks like you have may found a broken link!
Please report it mentioning which page you came from and which link you clicked on by filing a bug here.

You can also fix the problem yourself by sending a pull request to GitHub:
github.com/debezium/debezium.github.io

\ No newline at end of file diff --git a/archives/index.html b/archives/index.html index dba2ff06da..2b06d3aa93 100644 --- a/archives/index.html +++ b/archives/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/10/index.html b/archives/page/10/index.html index acee04ecbc..0d5b59bb41 100644 --- a/archives/page/10/index.html +++ b/archives/page/10/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/11/index.html b/archives/page/11/index.html index 15a690346a..b17fccb54b 100644 --- a/archives/page/11/index.html +++ b/archives/page/11/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/12/index.html b/archives/page/12/index.html index e8e92dadbf..3dfb2d0d6f 100644 --- a/archives/page/12/index.html +++ b/archives/page/12/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/13/index.html b/archives/page/13/index.html index 4cee847d02..e60606e69b 100644 --- a/archives/page/13/index.html +++ b/archives/page/13/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/14/index.html b/archives/page/14/index.html index fbf3ec359b..018146b046 100644 --- a/archives/page/14/index.html +++ b/archives/page/14/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/15/index.html b/archives/page/15/index.html index 01347b44d4..4809a2cc47 100644 --- a/archives/page/15/index.html +++ b/archives/page/15/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/16/index.html b/archives/page/16/index.html index 452d79b9b6..9fe0f4551c 100644 --- a/archives/page/16/index.html +++ b/archives/page/16/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/17/index.html b/archives/page/17/index.html index 75948a4c0c..7b76defa86 100644 --- a/archives/page/17/index.html +++ b/archives/page/17/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/18/index.html b/archives/page/18/index.html index a78c4afe64..febcb94b57 100644 --- a/archives/page/18/index.html +++ b/archives/page/18/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/19/index.html b/archives/page/19/index.html index ed451c414d..79914ce5b2 100644 --- a/archives/page/19/index.html +++ b/archives/page/19/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/2/index.html b/archives/page/2/index.html index e151a3f14c..31c7650f75 100644 --- a/archives/page/2/index.html +++ b/archives/page/2/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/20/index.html b/archives/page/20/index.html index a3cf7507e5..85bc2f46a3 100644 --- a/archives/page/20/index.html +++ b/archives/page/20/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/21/index.html b/archives/page/21/index.html index b0134357a0..77c900d85c 100644 --- a/archives/page/21/index.html +++ b/archives/page/21/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/22/index.html b/archives/page/22/index.html index fdfd109cbf..21e311ad4d 100644 --- a/archives/page/22/index.html +++ b/archives/page/22/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/23/index.html b/archives/page/23/index.html index ded6566ad5..4ee743f2e8 100644 --- a/archives/page/23/index.html +++ b/archives/page/23/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/24/index.html b/archives/page/24/index.html index 4979cbbad3..5d99e54da2 100644 --- a/archives/page/24/index.html +++ b/archives/page/24/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/25/index.html b/archives/page/25/index.html index 88763e5327..1fec145f6f 100644 --- a/archives/page/25/index.html +++ b/archives/page/25/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/26/index.html b/archives/page/26/index.html index 1dff76b9e7..423b6d0986 100644 --- a/archives/page/26/index.html +++ b/archives/page/26/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/27/index.html b/archives/page/27/index.html index 8918056b9d..7573ef1fa8 100644 --- a/archives/page/27/index.html +++ b/archives/page/27/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/28/index.html b/archives/page/28/index.html index 3f79169407..691c7371cd 100644 --- a/archives/page/28/index.html +++ b/archives/page/28/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/29/index.html b/archives/page/29/index.html index b767c188b8..00ce11a206 100644 --- a/archives/page/29/index.html +++ b/archives/page/29/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/3/index.html b/archives/page/3/index.html index b96952c2ee..d650cdd57f 100644 --- a/archives/page/3/index.html +++ b/archives/page/3/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/30/index.html b/archives/page/30/index.html index b939718bf9..82e733dfcf 100644 --- a/archives/page/30/index.html +++ b/archives/page/30/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/31/index.html b/archives/page/31/index.html index b556308862..4341c94d3f 100644 --- a/archives/page/31/index.html +++ b/archives/page/31/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/32/index.html b/archives/page/32/index.html index 8a02d80c37..2be132c7c1 100644 --- a/archives/page/32/index.html +++ b/archives/page/32/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/33/index.html b/archives/page/33/index.html index 29675b4e3d..31797ac2df 100644 --- a/archives/page/33/index.html +++ b/archives/page/33/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/34/index.html b/archives/page/34/index.html index 83bf5e65da..7d1edbb4b1 100644 --- a/archives/page/34/index.html +++ b/archives/page/34/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/35/index.html b/archives/page/35/index.html index 8ab63169d5..4c9a51f884 100644 --- a/archives/page/35/index.html +++ b/archives/page/35/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/36/index.html b/archives/page/36/index.html index a4033d22a1..c1d489c0b9 100644 --- a/archives/page/36/index.html +++ b/archives/page/36/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/37/index.html b/archives/page/37/index.html index 86f60b19bf..e301c72e14 100644 --- a/archives/page/37/index.html +++ b/archives/page/37/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/38/index.html b/archives/page/38/index.html index f8da511a3e..961e6516f2 100644 --- a/archives/page/38/index.html +++ b/archives/page/38/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/39/index.html b/archives/page/39/index.html index 68d226e037..08a8cd671f 100644 --- a/archives/page/39/index.html +++ b/archives/page/39/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/4/index.html b/archives/page/4/index.html index 459ea45f65..84f27d05a6 100644 --- a/archives/page/4/index.html +++ b/archives/page/4/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/40/index.html b/archives/page/40/index.html index 060c66c71c..0dcf570bce 100644 --- a/archives/page/40/index.html +++ b/archives/page/40/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/41/index.html b/archives/page/41/index.html index a3241269b4..1327550cee 100644 --- a/archives/page/41/index.html +++ b/archives/page/41/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/42/index.html b/archives/page/42/index.html index 054ff8ea10..49a349516f 100644 --- a/archives/page/42/index.html +++ b/archives/page/42/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/43/index.html b/archives/page/43/index.html index f61a0309e7..965ef31db0 100644 --- a/archives/page/43/index.html +++ b/archives/page/43/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/44/index.html b/archives/page/44/index.html index dfaddac887..a0a091387c 100644 --- a/archives/page/44/index.html +++ b/archives/page/44/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/45/index.html b/archives/page/45/index.html index 0f867fe804..337e070244 100644 --- a/archives/page/45/index.html +++ b/archives/page/45/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/46/index.html b/archives/page/46/index.html index 9c5be2213a..38fa96d545 100644 --- a/archives/page/46/index.html +++ b/archives/page/46/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/47/index.html b/archives/page/47/index.html index 020c953275..62bf833362 100644 --- a/archives/page/47/index.html +++ b/archives/page/47/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/48/index.html b/archives/page/48/index.html index 280bf62b8e..6de490791e 100644 --- a/archives/page/48/index.html +++ b/archives/page/48/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/49/index.html b/archives/page/49/index.html index 2ac83eada1..5a6f2814f1 100644 --- a/archives/page/49/index.html +++ b/archives/page/49/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/5/index.html b/archives/page/5/index.html index 93e65fbcda..abc703a3f1 100644 --- a/archives/page/5/index.html +++ b/archives/page/5/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/50/index.html b/archives/page/50/index.html index 09ef389cbb..8f8e833703 100644 --- a/archives/page/50/index.html +++ b/archives/page/50/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/51/index.html b/archives/page/51/index.html index c9cc0b0d8e..c4f9cde5ed 100644 --- a/archives/page/51/index.html +++ b/archives/page/51/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/52/index.html b/archives/page/52/index.html index b98c636212..2cbc6a5818 100644 --- a/archives/page/52/index.html +++ b/archives/page/52/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/53/index.html b/archives/page/53/index.html index 09ab854db7..520d4cc5ce 100644 --- a/archives/page/53/index.html +++ b/archives/page/53/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/54/index.html b/archives/page/54/index.html index f5597fd1a7..71e0c4b10f 100644 --- a/archives/page/54/index.html +++ b/archives/page/54/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/6/index.html b/archives/page/6/index.html index 4573da5f62..b1fa9b20d2 100644 --- a/archives/page/6/index.html +++ b/archives/page/6/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/7/index.html b/archives/page/7/index.html index 9f583a3e65..573145894f 100644 --- a/archives/page/7/index.html +++ b/archives/page/7/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/8/index.html b/archives/page/8/index.html index 544b85e25c..6339f4c5fa 100644 --- a/archives/page/8/index.html +++ b/archives/page/8/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/archives/page/9/index.html b/archives/page/9/index.html index 61fda1dffe..98aca67f16 100644 --- a/archives/page/9/index.html +++ b/archives/page/9/index.html @@ -1 +1 @@ - Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file + Archive

2024

December 1

September 5

August 4

July 3

June 1

May 2

April 3

March 3

February 2

January 3

2023

November 2

August 3

March 2

2022

November 2

October 6

July 3

June 3

May 1

March 3

January 1

2021

June 3

May 4

April 1

February 2

January 3

2020

December 3

November 3

August 1

July 2

June 2

May 2

2019

November 1

September 3

August 1

June 5

April 2

March 2

January 1

2018

November 1

June 1

February 1

2017

December 2

November 2

October 1

September 2

August 1

June 1

April 2

March 2

2016

December 1

November 1

October 3

September 2

August 4

July 1

June 2

May 1

April 2

March 1

\ No newline at end of file diff --git a/blog/2016/03/18/Debezium-0-1-Released/index.html b/blog/2016/03/18/Debezium-0-1-Released/index.html index 3691e23f08..5dfbeec0f9 100644 --- a/blog/2016/03/18/Debezium-0-1-Released/index.html +++ b/blog/2016/03/18/Debezium-0-1-Released/index.html @@ -1 +1 @@ - Debezium 0.1 Released

Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Now the good news — Debezium 0.1 is now available and includes several significant features:

  • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

  • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

Although Debezium is really intended to be used as turnkey services, all of Debezium’s JARs and other artifacts are available in Maven Central. Detailed information about the features, tasks, and bugs are outlined in our release notes.

To make it easier to use a Debezium’s connector inside your own Kafka Connect service, we created a plugin archive (in both zip and tar.gz formats) that includes all JARs used by the connector not already included in Kafka Connect 0.9.0.1. Simply download, extract to your Kafka Connect 0.9.0.1 installation, and add all of the JARs to the service’s classpath. Once the service is restarted, you can then use the REST API to configure and manage connector instances that monitor the databases of your choice. MySQL connector plugin archive is located in Maven Central, so it’s even possible to use Maven to build a customized Kafka Connect service. We’ll generate these plugins for future connectors, too.

The Debezium platform has a lot of moving parts in Zookeeper, Kafka, and Kafka Connect. To make it much easier for you to try it out and play with it, we created Docker images and a tutorial that walks you through using Debezium. First, it has you use Docker to start a container for each of these services and a MySQL server with an example "inventory" database. It shows you how to use the RESTful API to register a connector to monitor the inventory database, how to watch the streams of data changes for various tables, and how changing the database produces new change events with very low latency. It also walks you through shutting down the Kafka Connect service, changing data while the service is not monitoring the database, and then restarting the Kafka Connect service to see how all of the data changes that occurred while the service was not running are still captured correctly in the streams. This tutorial really is a great way to interactively learn the basics of Debezium and change data capture.

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Gitter, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve the MySQL connector and add more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue. We plan to release 0.2 very soon with at least one additional connector.

Thanks to Emmanuel, Chris, Akshath, James, and Paul for their help with the release, questions, and discussions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.1 Released

Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Now the good news — Debezium 0.1 is now available and includes several significant features:

  • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

  • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

Although Debezium is really intended to be used as turnkey services, all of Debezium’s JARs and other artifacts are available in Maven Central. Detailed information about the features, tasks, and bugs are outlined in our release notes.

To make it easier to use a Debezium’s connector inside your own Kafka Connect service, we created a plugin archive (in both zip and tar.gz formats) that includes all JARs used by the connector not already included in Kafka Connect 0.9.0.1. Simply download, extract to your Kafka Connect 0.9.0.1 installation, and add all of the JARs to the service’s classpath. Once the service is restarted, you can then use the REST API to configure and manage connector instances that monitor the databases of your choice. MySQL connector plugin archive is located in Maven Central, so it’s even possible to use Maven to build a customized Kafka Connect service. We’ll generate these plugins for future connectors, too.

The Debezium platform has a lot of moving parts in Zookeeper, Kafka, and Kafka Connect. To make it much easier for you to try it out and play with it, we created Docker images and a tutorial that walks you through using Debezium. First, it has you use Docker to start a container for each of these services and a MySQL server with an example "inventory" database. It shows you how to use the RESTful API to register a connector to monitor the inventory database, how to watch the streams of data changes for various tables, and how changing the database produces new change events with very low latency. It also walks you through shutting down the Kafka Connect service, changing data while the service is not monitoring the database, and then restarting the Kafka Connect service to see how all of the data changes that occurred while the service was not running are still captured correctly in the streams. This tutorial really is a great way to interactively learn the basics of Debezium and change data capture.

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Gitter, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve the MySQL connector and add more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue. We plan to release 0.2 very soon with at least one additional connector.

Thanks to Emmanuel, Chris, Akshath, James, and Paul for their help with the release, questions, and discussions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/04/14/Debezium-website/index.html b/blog/2016/04/14/Debezium-website/index.html index 9135883c28..c4eee1a486 100644 --- a/blog/2016/04/14/Debezium-website/index.html +++ b/blog/2016/04/14/Debezium-website/index.html @@ -1 +1 @@ - Debezium Website

As you may have noticed, we have a new website with documentation, a blog, and information about the Debezium community and how you can contribute. Let us know what you think, and contribute improvements.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium Website

As you may have noticed, we have a new website with documentation, a blog, and information about the Debezium community and how you can contribute. Let us know what you think, and contribute improvements.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/04/15/parsing-ddl/index.html b/blog/2016/04/15/parsing-ddl/index.html index d422a32752..ecd9c87048 100644 --- a/blog/2016/04/15/parsing-ddl/index.html +++ b/blog/2016/04/15/parsing-ddl/index.html @@ -136,4 +136,4 @@ { LIKE old_tbl_name | (LIKE old_tbl_name) } create_definition: - ...

The CREATE literal was already consumed before our parseCreateTable begins, so it first tries to consume the TEMPORARY literal if available, the TABLE literal, the IF NOT EXISTS fragment if avaialble, and then consumes and parses the qualified name of the table. If the statement includes LIKE otherTable, it uses the databaseTables (which is the reference to our Tables object) to overwrite the definition of the named table with that of the referenced table. Otherwise, it obtains an editor for the new table, and then (like the grammar rules) parses a list of create_definition fragments, followed by table_options, partition_options, and possibly a select_statement.

Take a look at the full MySqlDdlParser class to see far more details.

Wrap up

This post goes into some detail about why the MySQL connector uses the DDL statements in the binlog, though we only scratched the surface about how the connector does the DDL parsing with its framework, and how that can be reused in future parsers for other DBMS dialects.

Try our tutorial to see the MySQL connector in action, and stay tuned for more connectors, releases, and news.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + ...

The CREATE literal was already consumed before our parseCreateTable begins, so it first tries to consume the TEMPORARY literal if available, the TABLE literal, the IF NOT EXISTS fragment if avaialble, and then consumes and parses the qualified name of the table. If the statement includes LIKE otherTable, it uses the databaseTables (which is the reference to our Tables object) to overwrite the definition of the named table with that of the referenced table. Otherwise, it obtains an editor for the new table, and then (like the grammar rules) parses a list of create_definition fragments, followed by table_options, partition_options, and possibly a select_statement.

Take a look at the full MySqlDdlParser class to see far more details.

Wrap up

This post goes into some detail about why the MySQL connector uses the DDL statements in the binlog, though we only scratched the surface about how the connector does the DDL parsing with its framework, and how that can be reused in future parsers for other DBMS dialects.

Try our tutorial to see the MySQL connector in action, and stay tuned for more connectors, releases, and news.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/05/31/Debezium-on-Kubernetes/index.html b/blog/2016/05/31/Debezium-on-Kubernetes/index.html index a539bb4f4d..a9402e0776 100644 --- a/blog/2016/05/31/Debezium-on-Kubernetes/index.html +++ b/blog/2016/05/31/Debezium-on-Kubernetes/index.html @@ -242,4 +242,4 @@ {"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"}],"optional":false,"name":"inventory.customers/pk"},"payload":{"id":1001}} {"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"},{"type":"string","optional":false,"field":"first_name"},{"type":"string","optional":false,"field":"last_name"},{"type":"string","optional":false,"field":"email"}],"optional":false,"name":"inventory.customers"},"payload":{"id":1001,"first_name":"Sally","last_name":"Thomas","email":"sally.thomas@acme.com"}} {"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"}],"optional":false,"name":"inventory.customers/pk"},"payload":{"id":1002}} {"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"},{"type":"string","optional":false,"field":"first_name"},{"type":"string","optional":false,"field":"last_name"},{"type":"string","optional":false,"field":"email"}],"optional":false,"name":"inventory.customers"},"payload":{"id":1002,"first_name":"George","last_name":"Bailey","email":"gbailey@foobar.com"}} {"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"}],"optional":false,"name":"inventory.customers/pk"},"payload":{"id":1003}} {"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"},{"type":"string","optional":false,"field":"first_name"},{"type":"string","optional":false,"field":"last_name"},{"type":"string","optional":false,"field":"email"}],"optional":false,"name":"inventory.customers"},"payload":{"id":1003,"first_name":"Edward","last_name":"Walker","email":"ed@walker.com"}} -{"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"}],"optional":false,"name":"inventory.customers/pk"},"payload":{"id":1004}} {"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"},{"type":"string","optional":false,"field":"first_name"},{"type":"string","optional":false,"field":"last_name"},{"type":"string","optional":false,"field":"email"}],"optional":false,"name":"inventory.customers"},"payload":{"id":1004,"first_name":"Anne","last_name":"Kretchmar","email":"annek@noanswer.org"}}

What happened? When we started Debezium’s MySQL connector, it started reading the binary replication log from the MySQL server, and it replayed all of the history and generated an event for each INSERT, UPDATE, and DELETE operation (though in our sample inventory database we only had INSERTs). If we or some client apps were to commit other changes to the database, Debezium would see those immediately and write those to the correct topic. In other words, Debezium records all of the changes to our MySQL database as events in Kafka topics! And from there, any tool, connector, or service can independnetly consume those event streams from Kafka and process them or put them into a different database, into Hadoop, elasticsearch, data grid, etc.

Cleanup

If you want to delete the connector, simply issue a REST request to remove it:

curl -i -X DELETE -H "Accept:application/json" http://localhost:8083/connectors/inventory-connector

Christian Posta

Christian is a Principal Middleware Architect at Red Hat, and an enthusiast of open-source software, Apache, Cloud, Integration, Kubernetes, Docker, OpenShift, and Fabric8.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +{"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"}],"optional":false,"name":"inventory.customers/pk"},"payload":{"id":1004}} {"schema":{"type":"struct","fields":[{"type":"int32","optional":false,"field":"id"},{"type":"string","optional":false,"field":"first_name"},{"type":"string","optional":false,"field":"last_name"},{"type":"string","optional":false,"field":"email"}],"optional":false,"name":"inventory.customers"},"payload":{"id":1004,"first_name":"Anne","last_name":"Kretchmar","email":"annek@noanswer.org"}}

What happened? When we started Debezium’s MySQL connector, it started reading the binary replication log from the MySQL server, and it replayed all of the history and generated an event for each INSERT, UPDATE, and DELETE operation (though in our sample inventory database we only had INSERTs). If we or some client apps were to commit other changes to the database, Debezium would see those immediately and write those to the correct topic. In other words, Debezium records all of the changes to our MySQL database as events in Kafka topics! And from there, any tool, connector, or service can independnetly consume those event streams from Kafka and process them or put them into a different database, into Hadoop, elasticsearch, data grid, etc.

Cleanup

If you want to delete the connector, simply issue a REST request to remove it:

curl -i -X DELETE -H "Accept:application/json" http://localhost:8083/connectors/inventory-connector

Christian Posta

Christian is a Principal Middleware Architect at Red Hat, and an enthusiast of open-source software, Apache, Cloud, Integration, Kubernetes, Docker, OpenShift, and Fabric8.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/06/10/Debezium-0.2.1-Released/index.html b/blog/2016/06/10/Debezium-0.2.1-Released/index.html index 1a7cc33989..4a3c1abdd0 100644 --- a/blog/2016/06/10/Debezium-0.2.1-Released/index.html +++ b/blog/2016/06/10/Debezium-0.2.1-Released/index.html @@ -1 +1 @@ - Debezium 0.2.1 Released

I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

What happened to 0.2.0? Well, we released it to Maven Central before we’d noticed a few problems that we thought it best to fix right away. Thus 0.2.1 was born.

Installing the MySQL connector

If you’ve already installed Zookeeper, Kafka, and Kafka Connect, then using Debezium’s MySQL connector is easy. Simply download the connector’s plugin archive, extract the JARs into your Kafka Connect environment, and add the directory with the JARs to Kafka Connect’s classpath. Restart your Kafka Connect process to pick up the new JARs.

If immutable containers are your thing, then check out Debezium’s Docker images for Zookeeper, Kafka, and Kafka Connect with the MySQL connector already pre-installed and ready to go. Our tutorial even walks you through using these images, and this is a great way to learn what Debezium is all about. You can even run Debezium on Kubernetes and OpenShift.

Using the MySQL connector

To use the connector to produce change events for a particular MySQL server or cluster, simply create a configuration file for the MySQL Connector and use the Kafka Connect REST API to add that connector to your Kafka Connect cluster. When the connector starts, it will grab a consistent snapshot of the databases in your MySQL server and start reading the MySQL binlog, producing events for every inserted, updated, and deleted row. The connector can optionally produce events with the DDL statements that were applied, and you can even choose to produce events for a subset of the databases and tables. Optionally ignore, mask, or truncate columns that are sensitive, too large, or not needed. See the MySQL connector’s documentation for all the details.

Using the libraries

Although Debezium is really intended to be used as turnkey services, all of Debezium’s JARs and other artifacts are available in Maven Central. You might want to use our MySQL DDL parser from our MySQL connector library to parse those DDL statments in your consumers.

We do provide a small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must manage and maintain all state normally kept inside Kafka’s distributed and replicated logs. It’s perfect for use in tests, and with careful consideration it may be useful in some applications.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.2.1 Released

I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

What happened to 0.2.0? Well, we released it to Maven Central before we’d noticed a few problems that we thought it best to fix right away. Thus 0.2.1 was born.

Installing the MySQL connector

If you’ve already installed Zookeeper, Kafka, and Kafka Connect, then using Debezium’s MySQL connector is easy. Simply download the connector’s plugin archive, extract the JARs into your Kafka Connect environment, and add the directory with the JARs to Kafka Connect’s classpath. Restart your Kafka Connect process to pick up the new JARs.

If immutable containers are your thing, then check out Debezium’s Docker images for Zookeeper, Kafka, and Kafka Connect with the MySQL connector already pre-installed and ready to go. Our tutorial even walks you through using these images, and this is a great way to learn what Debezium is all about. You can even run Debezium on Kubernetes and OpenShift.

Using the MySQL connector

To use the connector to produce change events for a particular MySQL server or cluster, simply create a configuration file for the MySQL Connector and use the Kafka Connect REST API to add that connector to your Kafka Connect cluster. When the connector starts, it will grab a consistent snapshot of the databases in your MySQL server and start reading the MySQL binlog, producing events for every inserted, updated, and deleted row. The connector can optionally produce events with the DDL statements that were applied, and you can even choose to produce events for a subset of the databases and tables. Optionally ignore, mask, or truncate columns that are sensitive, too large, or not needed. See the MySQL connector’s documentation for all the details.

Using the libraries

Although Debezium is really intended to be used as turnkey services, all of Debezium’s JARs and other artifacts are available in Maven Central. You might want to use our MySQL DDL parser from our MySQL connector library to parse those DDL statments in your consumers.

We do provide a small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must manage and maintain all state normally kept inside Kafka’s distributed and replicated logs. It’s perfect for use in tests, and with careful consideration it may be useful in some applications.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/06/22/Debezium-0-2-2-Released/index.html b/blog/2016/06/22/Debezium-0-2-2-Released/index.html index 13a391d1a6..447a11366f 100644 --- a/blog/2016/06/22/Debezium-0-2-2-Released/index.html +++ b/blog/2016/06/22/Debezium-0-2-2-Released/index.html @@ -1 +1 @@ - Debezium 0.2.2 Released

I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

Thanks to Chris, Christian, Konstantin, James, and Bhupinder for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.2.2 Released

I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

Thanks to Chris, Christian, Konstantin, James, and Bhupinder for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/07/26/Debezium-0-2-3-Released/index.html b/blog/2016/07/26/Debezium-0-2-3-Released/index.html index 64e0e906b8..a3b52c3385 100644 --- a/blog/2016/07/26/Debezium-0-2-3-Released/index.html +++ b/blog/2016/07/26/Debezium-0-2-3-Released/index.html @@ -1 +1 @@ - Debezium 0.2.3 Released

I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.2.3 Released

I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/08/02/capturing-changes-from-mysql/index.html b/blog/2016/08/02/capturing-changes-from-mysql/index.html index 3194c1a753..c46838712f 100644 --- a/blog/2016/08/02/capturing-changes-from-mysql/index.html +++ b/blog/2016/08/02/capturing-changes-from-mysql/index.html @@ -1 +1 @@ - Capturing changes from MySQL

Change data capture is a hot topic. Debezium’s goal is to make change data capture easy for multiple DBMSes, but admittedly we’re still a young open source project and so far we’ve only released a connector for MySQL with a connector for MongoDB that’s just around the corner. So it’s great to see how others are using and implementing change data capture. In this post, we’ll review Yelp’s approach and see how it is strikingly similar to Debezium’s MySQL connector.

Streaming data at Yelp

The Yelp Engineering Blog recently began a series describing their real-time streaming data infrastructure. The first post provides a good introduction and explains how moving from a monolith to a service-oriented architecture increased productivity, but also made it more challenging to work with data spread across the 100 services that own it. It’s totally worth your time to read it right now.

As Justin writes in the post, several reasons prompted them to create their own real time streaming data pipeline:

  • Ensuring data always remains consistent across services is always a difficult task, but especially so when things can and do go wrong. Transactions across services may be useful in some situations, but they’re not straightforward, are expensive, and can lead to request amplification where one service calls another, which coordinates with two others, etc.

  • Services that update data in multiple backend services suffer from the dual write problem, which is where a failure occurs after one backing service was updated but before the other could be updated and that always results in data inconsistencies that are difficult to track down and correct.

  • Combining and integrating data spread across multiple services can also be difficult and expensive, but it is even harder when that data is continously changing. One approach is to use bulk APIs, but these can beprohibitive to create, can result in inconsistencies, and pose real scalability problems when services need to continually receive the never-ending updates to data.

Yelp’s Real-Time Data Pipeline records changes to data on totally ordered distributed logs so that downstream consumers can receive and process the same changes in exactly the same order. Services can consume changes made by other services, and can therefore stay in sync without explicit interservice communication. This system uses among other things Kafka for event logs, a homegrown system named MySQLStreamer to capture committed changes to MySQL tables, Avro for message format and schemas, and a custom Schematizer service that tracks consumers and enforces the Avro schemas used for messages on every Kafka topic.

How Yelp captures MySQL changes

Perhaps most interesting for Debezium is how Yelp captures the committed changes in their MySQL databases and write them to Kafka topics. Their second post in the series goes into a lot more detail about their MySQLStreamer process that reads the MySQL binary log and continously processes the DDL statements and DML operations that appear in the log, generating the corresponding insert, update, delete, and refresh events, and writing these event messages to a separate Kafka topic for each MySQL table. We’ve mentioned before that MySQL’s row-level binlog events that result from the DML operation don’t include the full definition of the columns, so knowing what the columns mean in each event requires process the DDL statements that also appear in the binlog. Yelp uses a separate MySQL instance it calls the schema tracker database, which behaves like a MySQL slave to which are applied only the DDL statements they read from the binlog. This technique lets Yelp’s MySQLStreamer system know the state of the database schema and the structure of its tables at the point in the binlog where they are processing events. This is pretty interesting, because it uses the MySQL engine to handle the DDL parsing.

Yelp’s MySQLStreamer process uses another MySQL database to track internal state describing its position in the binlog, what events have been successfully published to Kafka, and, because the binlog position varies on each replica, replica-independent information about each transaction. This latter information is similar to MySQL GTIDs, although Yelp is using earlier versions of MySQL that do not support GTIDs.

Of course, special consideration has to be taken for databases that have been around for a long time. The MySQL binlogs are capped and will not contain the entire history of the databases, so Yelp’s MySQLStreamer process bootstraps the change data capture process of old databases by starting another clean MySQL replica, which will use the built-in MySQL replication mechanism with the MySQL blackhole database engine to obtain a consistent snapshot of the master and so that all activity is logged in the replica’s binlog while the replica actually stores no data.

Yelp’s MySQLStreamer mechanism is quite ingenious in its use of MySQL and multiple extra databases to capture changes from MySQL databases and write them to Kafka topics. The downside, of course, is that doing so does increase the operational complexity of the system.

Similar purpose, similar approach

Debezium is an open source project that is building a change data capture for a variety of DBMSes. Like Yelp’s MySQLStreamer, Debezium’s MySQL Connector can continously capture the committed changes to MySQL database rows and record these events in a separate Kafka topic for each table. When first started, Debezium’s MySQL Connector can perform an initial consistent snapshot and then begin reading the MySQL binlog. It uses both DDL and DML operations that appear in the binlog, directly parsing and using the DDL statements to learn the changes to each table’s structure and the mapping of each insert, update, and delete binlog event. And each resulting change event written to Kafka includes information about the originating MySQL server and its binlog position, as well as the before and/or after states of the affected row.

However, unlike Yelp’s MySQLStreamer, the Debezium MySQL connector doesn’t need or use extra MySQL databases to parse DDL or to store the connector’s state. Instead, Debezium is built on top of Kafka Connect, which is a new Kafka library that provides much of the generic functionality of reliably pulling data from external systems, pushing it into Kafka topics, and tracking what data has already been processed. Kafka Connect stores this state inside Kafka itself, simplifying the operational footprint. Debezium’s MySQL connector can then focus on the details of performing a consistent snapshot when required, reading the binlog, and converting the binlog events into useful change events.

Yelp’s real time data pipeline makes use of a custom Avro schema registry, and uses those Avro schemas to encode each event into a compact binary representation while keeping the metadata about the structure of the event. It’s possible to do this with Debezium, too: simply run Confluent’s Schema Registry as a service and then configure the Kafka Connect worker to use the Avro Converter. As the converter serializes each event, it looks at the structure defined by the connector and, when that structure changes, generates an updated Avro Schema and registers it with the Schema Registry. That new Avro schema is then used to encode the event (and others with an identical structure) into a compact binary form written to Kafka. And of course, consumers then also use the same Avro converter so that as events are deserialized, the converter coordinates with the Schema Registry whenever it needs an Avro schema it doesn’t know about. As a result, the events are stored in a compact manner while the events' content and metadata remain available, while Schema Registry captures and maintains the history of the Avro schema for each table as it evolves over time.

Capturing changes from MySQL with Debezium

If you’re interested in change data capture with MySQL (or any other DBMSes), give Debezium a try by going through our tutorial that walks you through starting Kafka, Kafka Connect, and Debezium’s MySQL Connector to see exactly what change data events look like and how they can be used. Best of all, it’s open source with a growing community of developers that has had the benefit of building on top of recently-created Kafka Connect framework. Our MySQL connector is ready now, but we’re working on connectors for other DBMSes. Specifically, our upcoming 0.3 release will include our MongoDB Connector, with 0.4 including connectors for PostgreSQL and/or Oracle.

Correction: A previous version of this post incorrectly stated that Yelp was using a MySQL version that did support GTIDs, when in fact they are using a version that does not support MySQL GTIDs. The post has been corrected, and the author regrets the mistake.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Capturing changes from MySQL

Change data capture is a hot topic. Debezium’s goal is to make change data capture easy for multiple DBMSes, but admittedly we’re still a young open source project and so far we’ve only released a connector for MySQL with a connector for MongoDB that’s just around the corner. So it’s great to see how others are using and implementing change data capture. In this post, we’ll review Yelp’s approach and see how it is strikingly similar to Debezium’s MySQL connector.

Streaming data at Yelp

The Yelp Engineering Blog recently began a series describing their real-time streaming data infrastructure. The first post provides a good introduction and explains how moving from a monolith to a service-oriented architecture increased productivity, but also made it more challenging to work with data spread across the 100 services that own it. It’s totally worth your time to read it right now.

As Justin writes in the post, several reasons prompted them to create their own real time streaming data pipeline:

  • Ensuring data always remains consistent across services is always a difficult task, but especially so when things can and do go wrong. Transactions across services may be useful in some situations, but they’re not straightforward, are expensive, and can lead to request amplification where one service calls another, which coordinates with two others, etc.

  • Services that update data in multiple backend services suffer from the dual write problem, which is where a failure occurs after one backing service was updated but before the other could be updated and that always results in data inconsistencies that are difficult to track down and correct.

  • Combining and integrating data spread across multiple services can also be difficult and expensive, but it is even harder when that data is continously changing. One approach is to use bulk APIs, but these can beprohibitive to create, can result in inconsistencies, and pose real scalability problems when services need to continually receive the never-ending updates to data.

Yelp’s Real-Time Data Pipeline records changes to data on totally ordered distributed logs so that downstream consumers can receive and process the same changes in exactly the same order. Services can consume changes made by other services, and can therefore stay in sync without explicit interservice communication. This system uses among other things Kafka for event logs, a homegrown system named MySQLStreamer to capture committed changes to MySQL tables, Avro for message format and schemas, and a custom Schematizer service that tracks consumers and enforces the Avro schemas used for messages on every Kafka topic.

How Yelp captures MySQL changes

Perhaps most interesting for Debezium is how Yelp captures the committed changes in their MySQL databases and write them to Kafka topics. Their second post in the series goes into a lot more detail about their MySQLStreamer process that reads the MySQL binary log and continously processes the DDL statements and DML operations that appear in the log, generating the corresponding insert, update, delete, and refresh events, and writing these event messages to a separate Kafka topic for each MySQL table. We’ve mentioned before that MySQL’s row-level binlog events that result from the DML operation don’t include the full definition of the columns, so knowing what the columns mean in each event requires process the DDL statements that also appear in the binlog. Yelp uses a separate MySQL instance it calls the schema tracker database, which behaves like a MySQL slave to which are applied only the DDL statements they read from the binlog. This technique lets Yelp’s MySQLStreamer system know the state of the database schema and the structure of its tables at the point in the binlog where they are processing events. This is pretty interesting, because it uses the MySQL engine to handle the DDL parsing.

Yelp’s MySQLStreamer process uses another MySQL database to track internal state describing its position in the binlog, what events have been successfully published to Kafka, and, because the binlog position varies on each replica, replica-independent information about each transaction. This latter information is similar to MySQL GTIDs, although Yelp is using earlier versions of MySQL that do not support GTIDs.

Of course, special consideration has to be taken for databases that have been around for a long time. The MySQL binlogs are capped and will not contain the entire history of the databases, so Yelp’s MySQLStreamer process bootstraps the change data capture process of old databases by starting another clean MySQL replica, which will use the built-in MySQL replication mechanism with the MySQL blackhole database engine to obtain a consistent snapshot of the master and so that all activity is logged in the replica’s binlog while the replica actually stores no data.

Yelp’s MySQLStreamer mechanism is quite ingenious in its use of MySQL and multiple extra databases to capture changes from MySQL databases and write them to Kafka topics. The downside, of course, is that doing so does increase the operational complexity of the system.

Similar purpose, similar approach

Debezium is an open source project that is building a change data capture for a variety of DBMSes. Like Yelp’s MySQLStreamer, Debezium’s MySQL Connector can continously capture the committed changes to MySQL database rows and record these events in a separate Kafka topic for each table. When first started, Debezium’s MySQL Connector can perform an initial consistent snapshot and then begin reading the MySQL binlog. It uses both DDL and DML operations that appear in the binlog, directly parsing and using the DDL statements to learn the changes to each table’s structure and the mapping of each insert, update, and delete binlog event. And each resulting change event written to Kafka includes information about the originating MySQL server and its binlog position, as well as the before and/or after states of the affected row.

However, unlike Yelp’s MySQLStreamer, the Debezium MySQL connector doesn’t need or use extra MySQL databases to parse DDL or to store the connector’s state. Instead, Debezium is built on top of Kafka Connect, which is a new Kafka library that provides much of the generic functionality of reliably pulling data from external systems, pushing it into Kafka topics, and tracking what data has already been processed. Kafka Connect stores this state inside Kafka itself, simplifying the operational footprint. Debezium’s MySQL connector can then focus on the details of performing a consistent snapshot when required, reading the binlog, and converting the binlog events into useful change events.

Yelp’s real time data pipeline makes use of a custom Avro schema registry, and uses those Avro schemas to encode each event into a compact binary representation while keeping the metadata about the structure of the event. It’s possible to do this with Debezium, too: simply run Confluent’s Schema Registry as a service and then configure the Kafka Connect worker to use the Avro Converter. As the converter serializes each event, it looks at the structure defined by the connector and, when that structure changes, generates an updated Avro Schema and registers it with the Schema Registry. That new Avro schema is then used to encode the event (and others with an identical structure) into a compact binary form written to Kafka. And of course, consumers then also use the same Avro converter so that as events are deserialized, the converter coordinates with the Schema Registry whenever it needs an Avro schema it doesn’t know about. As a result, the events are stored in a compact manner while the events' content and metadata remain available, while Schema Registry captures and maintains the history of the Avro schema for each table as it evolves over time.

Capturing changes from MySQL with Debezium

If you’re interested in change data capture with MySQL (or any other DBMSes), give Debezium a try by going through our tutorial that walks you through starting Kafka, Kafka Connect, and Debezium’s MySQL Connector to see exactly what change data events look like and how they can be used. Best of all, it’s open source with a growing community of developers that has had the benefit of building on top of recently-created Kafka Connect framework. Our MySQL connector is ready now, but we’re working on connectors for other DBMSes. Specifically, our upcoming 0.3 release will include our MongoDB Connector, with 0.4 including connectors for PostgreSQL and/or Oracle.

Correction: A previous version of this post incorrectly stated that Yelp was using a MySQL version that did support GTIDs, when in fact they are using a version that does not support MySQL GTIDs. The post has been corrected, and the author regrets the mistake.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/08/16/Debezium-0-2-4-Released/index.html b/blog/2016/08/16/Debezium-0-2-4-Released/index.html index 3043b51665..cf139c5c94 100644 --- a/blog/2016/08/16/Debezium-0-2-4-Released/index.html +++ b/blog/2016/08/16/Debezium-0-2-4-Released/index.html @@ -1 +1 @@ - Debezium 0.2.4 Released

I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.2.4 Released

I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/08/16/Debezium-0-3-0-Released/index.html b/blog/2016/08/16/Debezium-0-3-0-Released/index.html index e76fb2fc3b..1bca73f6e2 100644 --- a/blog/2016/08/16/Debezium-0-3-0-Released/index.html +++ b/blog/2016/08/16/Debezium-0-3-0-Released/index.html @@ -1 +1 @@ - Debezium 0.3.0 Released

After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.3.0 Released

After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/08/30/Debezium-0-3-1-Released/index.html b/blog/2016/08/30/Debezium-0-3-1-Released/index.html index e3b82d3660..a40eca638c 100644 --- a/blog/2016/08/30/Debezium-0-3-1-Released/index.html +++ b/blog/2016/08/30/Debezium-0-3-1-Released/index.html @@ -1 +1 @@ - Debezium 0.3.1 Released

We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.3.1 Released

We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/09/19/Serializing-Debezium-events-with-Avro/index.html b/blog/2016/09/19/Serializing-Debezium-events-with-Avro/index.html index 5000cff5a7..2b14209429 100644 --- a/blog/2016/09/19/Serializing-Debezium-events-with-Avro/index.html +++ b/blog/2016/09/19/Serializing-Debezium-events-with-Avro/index.html @@ -1,3 +1,3 @@ Serializing Debezium events with Avro

Although Debezium makes it easy to capture database changes and record them in Kafka, one of the more important decisions you have to make is how those change events will be serialized in Kafka. Every message in Kafka has a key and a value, and to Kafka these are opaque byte arrays. But when you set up Kafka Connect, you have to say how the Debezium event keys and values should be serialized to a binary form, and your consumers will also have to deserialize them back into a usable form.

Debezium event keys and values are both structured, so JSON is certainly a reasonable option — it’s flexible, ubiquitous, and language agnostic, but on the other hand it’s quite verbose. One alternative is Avro, which is also flexible and language agnostic, but also faster and results in smaller binary representations. Using Avro requires a bit more setup effort on your part and some additional software, but the advantages are often worth it.

Kafka serializers and deserializers

Before we get too far, let’s back up and review how Kafka producers and consumers normally do this serialization and deserialization. Because the keys and values are simple opaque byte arrays, you can use anything for your keys and values. For example, consider a case where we’re using simple whole numbers for the keys and strings for the values. Here, a producer of these messages would use a long serializer to convert the long keys to binary form and a string serializer to convert the String values to binary form. Meanwhile, the consumers use a long deserializer to convert the binary keys into usable long values, and a string deserializer to convert the binary values back into String objects.

In cases where the keys and/or values need to be a bit more structured, the producers and consumers can be written to use JSON structures for keys and/or values, and the Kafka-provided JSON serializer and deserializer to do the conversion to and from binary form stored within the Kafka messages. As we said earlier, using JSON for keys and/or values is very flexible and language agnostic, but it is also produces keys and values that are relatively large since the fields and structure of the JSON values need to be encoded as well.

Avro serialization

Avro is a data serialization mechanism that uses a schema to define the structure of data. Avro relies upon this schema when writing the data to the binary format, and the schema allows it to encode the fields within the data in a much more compact form. Avro also relies upon the schema when reading the data, too. But interestingly, Avro schemas are designed to evolve, so it is actually possible to use a slightly different schema for reading than what was used for writing. This feature makes Avro a great choice for Kafka serialization and deserialization.

Confluent provides a Kafka serializer and deserializer that uses Avro and a separate Schema Registry, and it works like this: when a numeric or string object are to be serialized, the Avro serializer will determine the corresponding Avro Schema for the given type, register with the Schema Registry this schema and the topic its used on, get back the unique identifier for the schema, and then encode in the binary form the unique identifier of the schema and the encoded value. The next message is likely to have the same type and thus schema, so the serializer can quickly encode the schema identifier and value for this message without having to talk to the Schema Registry. Only when needing to serialize a schema it hasn’t already seen does the Avro serializer talk with the Schema Registry. So not only is this fast, but it also produces very compact binary forms and allows for the producer to evolve its key and/or value schemas over time. The Schema Registry can also be configured to allow new versions of schemas to be registered only when they are compatible with the Avro schema evolution rules, ensuring that producers do not produce messages that consumers will not be able to read.

Consumers, meanwhile, use the Avro deserializer, which works in a similar manner, albeit backwards: when it reads the binary form of a key or value, it first looks for the schema identifier and, if it hasn’t seen it before asks the Schema Registry for the schema, and then uses that schema to decode the remainder of the binary representation into its object form. Again, if the deserializer has previously seen a particular schema identifier, it already has the schema needed to decode the data and doesn’t have to consult the Schema Registry.

Kafka Connect converters

Kafka Connect is a bit different than many Kafka producers/consumers, since the keys and values will often be structured. And rather than require connectors to work with JSON objects, Kafka Connect defines its own lightweight framework for defining data structures with a schema, making it much easier to write connectors to work with structured data. Kafka Connect defines its own converters that are similar to Kafka (de)serializers, except that Kafka Connect’s converters know about these structures and schemas and can serialize the keys and values to binary form. Kafka Connect provides a JSON converter that converts the structures into JSON and then uses the normal Kafka JSON serializer, so downstream consumers can just use the normal Kafka JSON deserializer and get a JSON representation of the Kafka Connect structs and schema. This is exactly what the Debezium tutorial is using, and the watch-topic consumer knows to use the JSON deserializer.

One great feature of Kafka Connect is that the connectors simply provide the structured messages, and Kafka Connect takes care of serializing them using the configured converter. This means that you can use any Kafka Connect converters with any Kafka Connect connector, including all of Debezium’s connectors.

Kafka Connect’s schema system was designed specifically with Avro in mind, so there is a one-to-one mapping between Kafka Connect schemas and Avro schemas. Confluent provides an Avro Converter for Kafka Connect that serializes the Kafka Connect structs provided by the connectors into the compact Avro binary representation, again using the Schema Registry just like the Avro serializer. The consumer just uses the normal Avro deserializer as mentioned above.

Using Avro for serialization of Debezium events brings several significant advantages:

  1. The encoded binary forms of the Debezium events are significantly smaller than the JSON representations. Not only is the structured data encoded in a more compact form, but the schema associated with that structured data is represented in the binary form as a single integer.

  2. Encoding the Debezium events into their Avro binary forms is fast. Only when the converter sees a new schema does it have to consult with the Schema Registry; otherwise, the schema has already been seen and its encoding logic already precomputed.

  3. The Avro Converter for Kafka Connect produces messages with Avro-encoded keys and values that can be read by any Kafka consumers using the Avro deserializer.

  4. Debezium event structures are based upon the structure of the table from which the changes were captured. When the structure of the source table changes (e.g., because an ALTER statement was applied to it), the structure and schema of the events will also change. If this is done in a manner such that the new Avro schema is compatible with the older Avro schema, then consumers will be able to process the events without disruption, even though the event structures evolve over time.

  5. Avro’s schema mechanism is far more formal and rigorous than the free-form JSON structure, and the changes in the schemas are clearly identified when comparing any two messages.

  6. The Avro converter, Avro (de)serializers, and Schema Registry are all open source.

It is true that using the Avro converter and deserializer requires a running Schema Registry, and that the registry becomes an integral part of your streaming infrastructure. However, this is a small price to pay for the benefits listed above.

Using the Avro Converter with Debezium

As mentioned above, in the interest of keeping the Debezium tutorial as simple as possible, we avoid using the Schema Registry or the Avro converter in the tutorial. We also don’t (yet) include the Avro converter in our Docker images, though that will change soon.

Nevertheless, it is absolutely possible to use the Avro Converter with the Debezium connectors when you are installing the connectors into either the Confluent Platform or into your own installation of Kafka Connect. Simply configure the Kafka Connect workers to use the Avro converter for the keys and values:

key.converter=io.confluent.connect.avro.AvroConverter
 value.converter=io.confluent.connect.avro.AvroConverter

And, if you want to use the Avro Converter for Kafka Connect internal messages, then set these as well:

internal.key.converter=io.confluent.connect.avro.AvroConverter
-internal.value.converter=io.confluent.connect.avro.AvroConverter

Once again, there is no need to configure the Debezium connectors any differently.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +internal.value.converter=io.confluent.connect.avro.AvroConverter

Once again, there is no need to configure the Debezium connectors any differently.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/09/26/Debezium-0-3-2-Released/index.html b/blog/2016/09/26/Debezium-0-3-2-Released/index.html index 47a030ba78..ef24e3ba9f 100644 --- a/blog/2016/09/26/Debezium-0-3-2-Released/index.html +++ b/blog/2016/09/26/Debezium-0-3-2-Released/index.html @@ -1 +1 @@ - Debezium 0.3.2 Released

We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.3.2 Released

We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/10/18/Debezium-0-3-3-Released/index.html b/blog/2016/10/18/Debezium-0-3-3-Released/index.html index c19e3f8875..60ea0aaacf 100644 --- a/blog/2016/10/18/Debezium-0-3-3-Released/index.html +++ b/blog/2016/10/18/Debezium-0-3-3-Released/index.html @@ -1 +1 @@ - Debezium 0.3.3 Released

We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.3.3 Released

We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/10/19/Support-for-MySQL-JSON-typpe-coming-soon/index.html b/blog/2016/10/19/Support-for-MySQL-JSON-typpe-coming-soon/index.html index 2524a4b298..91df597428 100644 --- a/blog/2016/10/19/Support-for-MySQL-JSON-typpe-coming-soon/index.html +++ b/blog/2016/10/19/Support-for-MySQL-JSON-typpe-coming-soon/index.html @@ -1 +1 @@ - Support for MySQL's JSON type coming soon

MySQL 5.7 introduced a new data type for storing and working with JSON data. Clients can define tables with columns using the new JSON datatype, and they can store and read JSON data using SQL statements and new built-in JSON functions to construct JSON data from other relational columns, introspect the structure of JSON values, and search within and manipulate JSON data. It possible to define generated columns on tables whose values are computed from the JSON value in another column of the same table, and to then define indexes with those generated columns. Overall, this is really a very powerful feature in MySQL.

Debezium’s MySQL connector will support the JSON datatype starting with the upcoming 0.3.4 release. JSON document, array, and scalar values will appear in change events as strings with io.debezium.data.json for the schema name. This will make it natural for consumers to work with JSON data. BTW, this is the same semantic schema type used by the MongoDB connector to represent JSON data.

This sounds straightforward, and we hope it is. But implementing this required a fair amount of work. That’s because although MySQL exposes JSON data as strings to client applications, internally it stores all JSON data in a special binary form that allows the MySQL engine to efficiently access the JSON data in queries, JSON functions and generated columns. All JSON data appears in the binlog in this binary form as well, which meant that we had to parse the binary form ourselves if we wanted to extract the more useful string representation. Writing and testing this parser took a bit of time and effort, and ultimately we donated it to the excellent MySQL binlog client library that the connector uses internally to read the binlog events.

We’d like to thank Stanley Shyiko for guiding us and helping us debug the final problems with parsing JSON in the binlog, for accepting our proposed changes into his library, for releasing his library quickly when needed, and for being so responsive on this and other issues!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Support for MySQL's JSON type coming soon

MySQL 5.7 introduced a new data type for storing and working with JSON data. Clients can define tables with columns using the new JSON datatype, and they can store and read JSON data using SQL statements and new built-in JSON functions to construct JSON data from other relational columns, introspect the structure of JSON values, and search within and manipulate JSON data. It possible to define generated columns on tables whose values are computed from the JSON value in another column of the same table, and to then define indexes with those generated columns. Overall, this is really a very powerful feature in MySQL.

Debezium’s MySQL connector will support the JSON datatype starting with the upcoming 0.3.4 release. JSON document, array, and scalar values will appear in change events as strings with io.debezium.data.json for the schema name. This will make it natural for consumers to work with JSON data. BTW, this is the same semantic schema type used by the MongoDB connector to represent JSON data.

This sounds straightforward, and we hope it is. But implementing this required a fair amount of work. That’s because although MySQL exposes JSON data as strings to client applications, internally it stores all JSON data in a special binary form that allows the MySQL engine to efficiently access the JSON data in queries, JSON functions and generated columns. All JSON data appears in the binlog in this binary form as well, which meant that we had to parse the binary form ourselves if we wanted to extract the more useful string representation. Writing and testing this parser took a bit of time and effort, and ultimately we donated it to the excellent MySQL binlog client library that the connector uses internally to read the binlog events.

We’d like to thank Stanley Shyiko for guiding us and helping us debug the final problems with parsing JSON in the binlog, for accepting our proposed changes into his library, for releasing his library quickly when needed, and for being so responsive on this and other issues!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/10/25/Debezium-0-3-4-Released/index.html b/blog/2016/10/25/Debezium-0-3-4-Released/index.html index 937c9c923a..bb5ac964b5 100644 --- a/blog/2016/10/25/Debezium-0-3-4-Released/index.html +++ b/blog/2016/10/25/Debezium-0-3-4-Released/index.html @@ -1 +1 @@ - Debezium 0.3.4 Released

We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.3.4 Released

We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/11/14/Debezium-0-3-5-Released/index.html b/blog/2016/11/14/Debezium-0-3-5-Released/index.html index 2bc85b1415..640198c341 100644 --- a/blog/2016/11/14/Debezium-0-3-5-Released/index.html +++ b/blog/2016/11/14/Debezium-0-3-5-Released/index.html @@ -1 +1 @@ - Debezium 0.3.5 Released

We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.3.5 Released

We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2016/12/21/Debezium-0-3-6-Released/index.html b/blog/2016/12/21/Debezium-0-3-6-Released/index.html index d2a9566d2a..7996a3e0c2 100644 --- a/blog/2016/12/21/Debezium-0-3-6-Released/index.html +++ b/blog/2016/12/21/Debezium-0-3-6-Released/index.html @@ -1 +1 @@ - Debezium 0.3.6 Released

We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

What’s next

We’ll continue to improve the MongoDB and MySQL connectors, and we also have a great PostgreSQL connector that is nearly ready to be released. With the new connector we’ll switch release numbers to 0.4.x and plan to stop issuing 0.3.x releases. Stay tuned for this next 0.4.0 release!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.3.6 Released

We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

What’s next

We’ll continue to improve the MongoDB and MySQL connectors, and we also have a great PostgreSQL connector that is nearly ready to be released. With the new connector we’ll switch release numbers to 0.4.x and plan to stop issuing 0.3.x releases. Stay tuned for this next 0.4.0 release!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/02/07/Debezium-0-4-0-Released/index.html b/blog/2017/02/07/Debezium-0-4-0-Released/index.html index cb904593a9..ac1d7ca7ec 100644 --- a/blog/2017/02/07/Debezium-0-4-0-Released/index.html +++ b/blog/2017/02/07/Debezium-0-4-0-Released/index.html @@ -1 +1 @@ - Debezium 0.4.0 Released

We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

What’s next

We’ll continue to improve the MongoDB, MySQL, and PostgreSQL connectors and pushing out 0.4.x releases. We’re also going to work on a few new connectors, though we’ll likely increase the minor version with each new connector. Stay tuned and get involved!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.4.0 Released

We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

What’s next

We’ll continue to improve the MongoDB, MySQL, and PostgreSQL connectors and pushing out 0.4.x releases. We’re also going to work on a few new connectors, though we’ll likely increase the minor version with each new connector. Stay tuned and get involved!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/02/08/Support-for-Postgresql/index.html b/blog/2017/02/08/Support-for-Postgresql/index.html index 07f795cc34..276bb5aa9d 100644 --- a/blog/2017/02/08/Support-for-Postgresql/index.html +++ b/blog/2017/02/08/Support-for-Postgresql/index.html @@ -1 +1 @@ - PostgreSQL support added to Debezium

With the recent Debezium release, we’re happy to announce that a new PostgreSQL connector has been added alongside the already existing MySQL and MongoDB connectors.

Make sure you read the connector documentation for an in-depth look at the different configuration options.

Getting started

The fastest way to check out the new connector is using Debezium’s Postgres docker image which is based on a vanilla Postgres docker image on top of which it compiles and installs a PostgreSQL logical decoding plugin and sets up the necessary permissions for streaming changes locally (on localhost)

Once you fire up the Docker machine with the database server, starting up and configuring the connector to stream changes from that machine is exactly the same as described in detail by the Debezium tutorial. The only obvious difference is that instead of the MySQL machine and MySQL connector configuration you need to use the PostgreSQL machine and the PostgreSQL connector configuration parameters.

Using the connector in your own environment

Unlike the Mongo and MySQL connectors, getting the PostgreSQL connector up and running is a bit more complicated due to the fact that it requires a server-side logical decoding plugin running in the PostgreSQL server.

In general, there are three major steps involved in getting the connector running in your environment:

  1. Compiling and installing the logical decoding plugin into your own server

  2. Setting up the PostgreSQL server with appropriate replication permissions

  3. Starting the Kafka Connect, Broker and Zookeeper machines

For steps 1 and 2 you can check out our PostgreSQL container image together with the sources for the logical decoding plugin

For step 3 you can either use Debezium’s Kafka container images or perform a similar setup locally. The Debezium tutorial and the the connector documentation are great resources for helping out with this task.

Horia Chiorean

Horia is an open source software developer at Red Hat and is actively involved in projects like Debezium and ModeShape.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + PostgreSQL support added to Debezium

With the recent Debezium release, we’re happy to announce that a new PostgreSQL connector has been added alongside the already existing MySQL and MongoDB connectors.

Make sure you read the connector documentation for an in-depth look at the different configuration options.

Getting started

The fastest way to check out the new connector is using Debezium’s Postgres docker image which is based on a vanilla Postgres docker image on top of which it compiles and installs a PostgreSQL logical decoding plugin and sets up the necessary permissions for streaming changes locally (on localhost)

Once you fire up the Docker machine with the database server, starting up and configuring the connector to stream changes from that machine is exactly the same as described in detail by the Debezium tutorial. The only obvious difference is that instead of the MySQL machine and MySQL connector configuration you need to use the PostgreSQL machine and the PostgreSQL connector configuration parameters.

Using the connector in your own environment

Unlike the Mongo and MySQL connectors, getting the PostgreSQL connector up and running is a bit more complicated due to the fact that it requires a server-side logical decoding plugin running in the PostgreSQL server.

In general, there are three major steps involved in getting the connector running in your environment:

  1. Compiling and installing the logical decoding plugin into your own server

  2. Setting up the PostgreSQL server with appropriate replication permissions

  3. Starting the Kafka Connect, Broker and Zookeeper machines

For steps 1 and 2 you can check out our PostgreSQL container image together with the sources for the logical decoding plugin

For step 3 you can either use Debezium’s Kafka container images or perform a similar setup locally. The Debezium tutorial and the the connector documentation are great resources for helping out with this task.

Horia Chiorean

Horia is an open source software developer at Red Hat and is actively involved in projects like Debezium and ModeShape.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/02/22/Debezium-at-WePay/index.html b/blog/2017/02/22/Debezium-at-WePay/index.html index 90640fe510..758fc51f6b 100644 --- a/blog/2017/02/22/Debezium-at-WePay/index.html +++ b/blog/2017/02/22/Debezium-at-WePay/index.html @@ -81,4 +81,4 @@ }, "op": "u", "ts_ms": 1465581029523 -}

The serialization format that Debezium sends to Kafka is configurable. We prefer Avro at WePay for its compact size, schema DDL, performance, and rich ecosystem. We’ve configured Kafka connect to use Confluent’s Avro encoder codec for Kafka. This encoder serializes messages to Avro, but also registers the schemas with Confluent’s schema registry.

If a MySQL table’s schema is changed, Debezium adapts to the change by updating the structure and schema of the "before" and "after" portions of its event messages. This will appear to the Avro encoder as a new schema, which it will register with the schema registry before the message is sent to Kafka. The registry runs full compatibility checks to make sure that downstream consumers don’t break due to a schema evolution.

Note that it’s still possible to make an incompatible change in the MySQL schema itself, which would break downstream consumers. We have not yet added automatic compatibility checks to MySQL table alters.

Future work

Monolithic database

In addition to our microservices, we have a legacy monolithic database that’s much larger than our microservice databases. We’re in the process of upgrading this cluster to run with GTIDs enabled. Once this is done, we plan to replicate this cluster into Kafka with Debezium as well.

Large table snapshots

We’re lucky that all of our microservice databases are of relatively manageable size. Our monolithic database has some tables that are much larger. We have yet to test Debezium with very large tables, so it’s unclear if any tuning or patches will be required in order to snapshot these tables on the initial Debezium load. We have heard community reports that larger tables (6 billion+ rows) do work, provided that the configuration exposed in DBZ-152 is set. This is work we’re planning to do shortly.

More monitoring

Kafka connect doesn’t currently make it easy to expose metrics through the Kafka metrics framework. As a result, there are very few metrics available from the Kafka connect framework. Debezium does expose metrics via JMX (see DBZ-134), but we aren’t exposing them to our metrics system currently. We do monitor the system, but when things go wrong, it can be difficult to determine what’s going on. KAFKA-2376 is the open JIRA that’s meant to address the underlying Kafka connect issue.

More databases

As we add more microservice databases, we’ll begin to put pressure on the two Debezium MySQL servers that we have. Eventually, we plan to split the single Debezium cluster that we have into more than one, with some microservices replicating only to one cluster, and the rest replicating to others.

Unify compatibility checks

As I mentioned in the schema section, above, the Confluent schema registry runs schema compatibility checks out of the box right now. This makes it very easy for us to prevent backward and forward incompatible changes from making their way into Kafka. We don’t currently have an equivalent check at the MySQL layer. This is a problem because it means it’s possible for a DBA to make incompatible changes at the MySQL layer. Debezium will then fail when trying to produce the new messages into Kafka. We need to make sure this can’t happen by adding equivalent checks at the MySQL layer. DBZ-70 discusses this more.

Automatic topic configuration

We currently run Kafka with topic auto-create enabled with a default of 6 partitions, and time-based/size-based retention. This configuration doesn’t make much sense for Debezium topics. At the very least, they should be using log-compaction as their retention. We plan to write a script that looks for mis-configured Debezium topics, and updates them to appropriate retention settings.

Conclusion

We’ve been running Debezium in production for the past 8 months. Initially, we ran it dark, and then enabled it for the realtime BigQuery pipeline shown in the architecture diagram above. Recently, we’ve begun consuming the messages in microservices and stream processing systems. We look forward to adding more data to the pipeline, and addressing some of the issues that were raised in the Future work section.

A special thanks to Randall Hauch, who has been invaluable in addressing a number of bug fixes and feature requests.

Chris Riccomini

Chris is a Principal Software Engineer at WePay.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +}

The serialization format that Debezium sends to Kafka is configurable. We prefer Avro at WePay for its compact size, schema DDL, performance, and rich ecosystem. We’ve configured Kafka connect to use Confluent’s Avro encoder codec for Kafka. This encoder serializes messages to Avro, but also registers the schemas with Confluent’s schema registry.

If a MySQL table’s schema is changed, Debezium adapts to the change by updating the structure and schema of the "before" and "after" portions of its event messages. This will appear to the Avro encoder as a new schema, which it will register with the schema registry before the message is sent to Kafka. The registry runs full compatibility checks to make sure that downstream consumers don’t break due to a schema evolution.

Note that it’s still possible to make an incompatible change in the MySQL schema itself, which would break downstream consumers. We have not yet added automatic compatibility checks to MySQL table alters.

Future work

Monolithic database

In addition to our microservices, we have a legacy monolithic database that’s much larger than our microservice databases. We’re in the process of upgrading this cluster to run with GTIDs enabled. Once this is done, we plan to replicate this cluster into Kafka with Debezium as well.

Large table snapshots

We’re lucky that all of our microservice databases are of relatively manageable size. Our monolithic database has some tables that are much larger. We have yet to test Debezium with very large tables, so it’s unclear if any tuning or patches will be required in order to snapshot these tables on the initial Debezium load. We have heard community reports that larger tables (6 billion+ rows) do work, provided that the configuration exposed in DBZ-152 is set. This is work we’re planning to do shortly.

More monitoring

Kafka connect doesn’t currently make it easy to expose metrics through the Kafka metrics framework. As a result, there are very few metrics available from the Kafka connect framework. Debezium does expose metrics via JMX (see DBZ-134), but we aren’t exposing them to our metrics system currently. We do monitor the system, but when things go wrong, it can be difficult to determine what’s going on. KAFKA-2376 is the open JIRA that’s meant to address the underlying Kafka connect issue.

More databases

As we add more microservice databases, we’ll begin to put pressure on the two Debezium MySQL servers that we have. Eventually, we plan to split the single Debezium cluster that we have into more than one, with some microservices replicating only to one cluster, and the rest replicating to others.

Unify compatibility checks

As I mentioned in the schema section, above, the Confluent schema registry runs schema compatibility checks out of the box right now. This makes it very easy for us to prevent backward and forward incompatible changes from making their way into Kafka. We don’t currently have an equivalent check at the MySQL layer. This is a problem because it means it’s possible for a DBA to make incompatible changes at the MySQL layer. Debezium will then fail when trying to produce the new messages into Kafka. We need to make sure this can’t happen by adding equivalent checks at the MySQL layer. DBZ-70 discusses this more.

Automatic topic configuration

We currently run Kafka with topic auto-create enabled with a default of 6 partitions, and time-based/size-based retention. This configuration doesn’t make much sense for Debezium topics. At the very least, they should be using log-compaction as their retention. We plan to write a script that looks for mis-configured Debezium topics, and updates them to appropriate retention settings.

Conclusion

We’ve been running Debezium in production for the past 8 months. Initially, we ran it dark, and then enabled it for the realtime BigQuery pipeline shown in the architecture diagram above. Recently, we’ve begun consuming the messages in microservices and stream processing systems. We look forward to adding more data to the pipeline, and addressing some of the issues that were raised in the Future work section.

A special thanks to Randall Hauch, who has been invaluable in addressing a number of bug fixes and feature requests.

Chris Riccomini

Chris is a Principal Software Engineer at WePay.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/03/17/Debezium-0-4-1-Released/index.html b/blog/2017/03/17/Debezium-0-4-1-Released/index.html index 945e656c1c..affc8704ab 100644 --- a/blog/2017/03/17/Debezium-0-4-1-Released/index.html +++ b/blog/2017/03/17/Debezium-0-4-1-Released/index.html @@ -1 +1 @@ - Debezium 0.4.1 Released

We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

What’s next

Kafka 0.10.2.0 is out, so we plan to release 0.5.0 next week with all of the changes/fixes in 0.4.1 but with support for Kafka 0.10.2.0. We’ll then continue to improve the MongoDB, MySQL, and PostgreSQL connectors and pushing out 0.5.x releases. Stay tuned and get involved!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.4.1 Released

We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

What’s next

Kafka 0.10.2.0 is out, so we plan to release 0.5.0 next week with all of the changes/fixes in 0.4.1 but with support for Kafka 0.10.2.0. We’ll then continue to improve the MongoDB, MySQL, and PostgreSQL connectors and pushing out 0.5.x releases. Stay tuned and get involved!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/03/27/Debezium-0-5-0-Released/index.html b/blog/2017/03/27/Debezium-0-5-0-Released/index.html index ae5691b4b4..70e9c4ee18 100644 --- a/blog/2017/03/27/Debezium-0-5-0-Released/index.html +++ b/blog/2017/03/27/Debezium-0-5-0-Released/index.html @@ -1 +1 @@ - Debezium 0.5.0 Released

We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

What’s next

We’ll continue to improve the MongoDB, MySQL, and PostgreSQL connectors and pushing out 0.5.x releases with fixes. And we’re still working on connectors for SQL Server and Oracle. Stay tuned and get involved!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.5.0 Released

We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

What’s next

We’ll continue to improve the MongoDB, MySQL, and PostgreSQL connectors and pushing out 0.5.x releases with fixes. And we’re still working on connectors for SQL Server and Oracle. Stay tuned and get involved!

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/04/26/Debezium-evolving/index.html b/blog/2017/04/26/Debezium-evolving/index.html index 845091389f..b7cc7e5762 100644 --- a/blog/2017/04/26/Debezium-evolving/index.html +++ b/blog/2017/04/26/Debezium-evolving/index.html @@ -1 +1 @@ - Debezium Evolving

Just before I started the Debezium project in early 2016, Martin Kleppmann gave several presentations about turning the database inside out and how his Bottled Water project demonstrated the importantance that change data capture can play in using Kafka for stream processing. Then Kafka Connect was announced, and at that point it seemed obvious to me that Kafka Connect was the foundation upon which practical and reusable change data capture can be built. As these techniques and technologies were becoming more important to Red Hat, I was given the opportunity to start a new open source project and community around building great CDC connectors for a variety of databases management systems.

Over the past few years, we have created Kafka Connect connectors for MySQL, then MongoDB, and most recently PostgreSQL. Each were initially limited and had a number of problems and issues, but over time more and more people have tried the connectors, asked questions, answered questions, mentioned Debezium on Twitter, tested connectors in their own environments, reported problems, fixed bugs, discussed limitations and potential new features, implemented enhancements and new features, improved the documentation, and wrote blog posts. Simply put, people with similar needs and interests have worked together and have formed a community. Additional connectors for Oracle and SQL Server are in the works, but could use some help to move things along more quickly.

It’s really exciting to see how far we’ve come and how the Debezium community continues to evolve and grow. And it’s perhaps as good a time as any to hand the reigns over to someone else. In fact, after nearly 10 wonderful years at Red Hat, I’m making a bigger change and as of today am part of Confluent’s engineering team, where I expect to play a more active role in the broader Kafka community and more directly with Kafka Connect and Kafka Streams. I definitely plan to stay involved in the Debezium community, but will no longer be leading the project. That role will instead be filled by Gunnar Morling, who’s recently joined the Debezium community but has extensive experience in open source, the Hibernate community, and the Bean Validation specification effort. Gunnar is a great guy and an excellent developer, and will be an excellent lead for the Debezium community.

Will the Debezium project change? To some degree it will always continue to evolve just as it has from the very beginning, and that’s a healthy thing. But a lot is staying the same. Red Hat remains committed to the Debezium project, and will continue its sponsorship and community-oriented governance that has worked so well from the beginning. And just as importantly, we the community are still here and will continue building the best open source CDC connectors.

So keep up the great work, and look for and take advantage of opportunities to become more involved in Debezium. Please give a warm welcome to Gunnar by introducing yourself in the developer and / or user chat rooms and mention how you’re using Debezium and what the Debezium community means to you.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium Evolving

Just before I started the Debezium project in early 2016, Martin Kleppmann gave several presentations about turning the database inside out and how his Bottled Water project demonstrated the importantance that change data capture can play in using Kafka for stream processing. Then Kafka Connect was announced, and at that point it seemed obvious to me that Kafka Connect was the foundation upon which practical and reusable change data capture can be built. As these techniques and technologies were becoming more important to Red Hat, I was given the opportunity to start a new open source project and community around building great CDC connectors for a variety of databases management systems.

Over the past few years, we have created Kafka Connect connectors for MySQL, then MongoDB, and most recently PostgreSQL. Each were initially limited and had a number of problems and issues, but over time more and more people have tried the connectors, asked questions, answered questions, mentioned Debezium on Twitter, tested connectors in their own environments, reported problems, fixed bugs, discussed limitations and potential new features, implemented enhancements and new features, improved the documentation, and wrote blog posts. Simply put, people with similar needs and interests have worked together and have formed a community. Additional connectors for Oracle and SQL Server are in the works, but could use some help to move things along more quickly.

It’s really exciting to see how far we’ve come and how the Debezium community continues to evolve and grow. And it’s perhaps as good a time as any to hand the reigns over to someone else. In fact, after nearly 10 wonderful years at Red Hat, I’m making a bigger change and as of today am part of Confluent’s engineering team, where I expect to play a more active role in the broader Kafka community and more directly with Kafka Connect and Kafka Streams. I definitely plan to stay involved in the Debezium community, but will no longer be leading the project. That role will instead be filled by Gunnar Morling, who’s recently joined the Debezium community but has extensive experience in open source, the Hibernate community, and the Bean Validation specification effort. Gunnar is a great guy and an excellent developer, and will be an excellent lead for the Debezium community.

Will the Debezium project change? To some degree it will always continue to evolve just as it has from the very beginning, and that’s a healthy thing. But a lot is staying the same. Red Hat remains committed to the Debezium project, and will continue its sponsorship and community-oriented governance that has worked so well from the beginning. And just as importantly, we the community are still here and will continue building the best open source CDC connectors.

So keep up the great work, and look for and take advantage of opportunities to become more involved in Debezium. Please give a warm welcome to Gunnar by introducing yourself in the developer and / or user chat rooms and mention how you’re using Debezium and what the Debezium community means to you.

Randall Hauch

Randall is an open source software developer at Red Hat, and has been working in data integration for almost 20 years. He is the founder of Debezium and has worked on several other open source projects. He lives in Edwardsville, IL, near St. Louis.

     


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/04/27/hello-debezium/index.html b/blog/2017/04/27/hello-debezium/index.html index 95863de5fd..9cecf46db6 100644 --- a/blog/2017/04/27/hello-debezium/index.html +++ b/blog/2017/04/27/hello-debezium/index.html @@ -1 +1 @@ - Hello Debezium!

When I first learned about the Debezium project last year, I was very excited about it right away.

I could see how this project would be very useful for many people out there and I was very impressed by the professional way it was set up: a solid architecture for change data capture based on Apache Kafka, a strong focus on robustness and correctness also in the case of failures, the overall idea of creating a diverse eco-system of CDC connectors. All that based on the principles of open source, combined with extensive documentation from day one, a friendly and welcoming web site and a great getting-started experience.

So you can imagine that I was more than enthusiastic about the opportunity to take over the role of Debezium’s project lead. Debezium and CDC have close links to some data-centric projects I’ve been previously working on and also tie in with ideas I’ve been pursuing around CQRS, even sourcing and denormalization. As core member of the Hibernate team at Red Hat, I’ve implemented the initial Elasticsearch support for Hibernate Search (which deals with full-text index updates via JPA/Hibernate). I’ve also contributed to Hibernate OGM - a project which connects JPA and the world of NoSQL. One of the plans for OGM is to create a declarative denormalization engine for creating read models optimized for specific use cases. It will be very interesting to see how this plays together with the capabilities provided by Debezium.

Currently I am serving as the lead of the Bean Validation 2.0 specification (JSR 380) as well as its reference implementation Hibernate Validator. Two other projects close to my heart are MapStruct - a code generator for bean-to-bean mappings - and ModiTect, which is tooling for Java 9 modules and their descriptors. In general, I’m a strong believer into the idea of open source and I just love it to work with folks from all over the world to create useful tools and libraries.

Joining the Debezium community and working on change data capture is a great next step. There are so many things to do: connectors for Oracle, SQL Server and Cassandra, but also things like an entity join processor which would allow to step from row-level events to more aggregated business-level events (e.g. for updating a combined search index for an order and its order lines) or tooling for managing and visualizing histories of event schema changes.

One thing I’d like to emphasize is that the project’s direction generally isn’t going to change very much. Red Hat is fully committed to maintaining and evolving the project together with you, the Debezium community. The ride really has just begun!

Finally, let me say a huge thank you to Randall for his excellent work! You’ve been a true role model for going from an idea over pitching it - within Red Hat as well as within the wider community - to building a steadily growing and evolving project. It’s stating the obvious, but it wouldn’t be for Debezium without you. Thanks for everything and looking forward very much to working with you and the community on this great project!

Onwards,

--Gunnar

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Hello Debezium!

When I first learned about the Debezium project last year, I was very excited about it right away.

I could see how this project would be very useful for many people out there and I was very impressed by the professional way it was set up: a solid architecture for change data capture based on Apache Kafka, a strong focus on robustness and correctness also in the case of failures, the overall idea of creating a diverse eco-system of CDC connectors. All that based on the principles of open source, combined with extensive documentation from day one, a friendly and welcoming web site and a great getting-started experience.

So you can imagine that I was more than enthusiastic about the opportunity to take over the role of Debezium’s project lead. Debezium and CDC have close links to some data-centric projects I’ve been previously working on and also tie in with ideas I’ve been pursuing around CQRS, even sourcing and denormalization. As core member of the Hibernate team at Red Hat, I’ve implemented the initial Elasticsearch support for Hibernate Search (which deals with full-text index updates via JPA/Hibernate). I’ve also contributed to Hibernate OGM - a project which connects JPA and the world of NoSQL. One of the plans for OGM is to create a declarative denormalization engine for creating read models optimized for specific use cases. It will be very interesting to see how this plays together with the capabilities provided by Debezium.

Currently I am serving as the lead of the Bean Validation 2.0 specification (JSR 380) as well as its reference implementation Hibernate Validator. Two other projects close to my heart are MapStruct - a code generator for bean-to-bean mappings - and ModiTect, which is tooling for Java 9 modules and their descriptors. In general, I’m a strong believer into the idea of open source and I just love it to work with folks from all over the world to create useful tools and libraries.

Joining the Debezium community and working on change data capture is a great next step. There are so many things to do: connectors for Oracle, SQL Server and Cassandra, but also things like an entity join processor which would allow to step from row-level events to more aggregated business-level events (e.g. for updating a combined search index for an order and its order lines) or tooling for managing and visualizing histories of event schema changes.

One thing I’d like to emphasize is that the project’s direction generally isn’t going to change very much. Red Hat is fully committed to maintaining and evolving the project together with you, the Debezium community. The ride really has just begun!

Finally, let me say a huge thank you to Randall for his excellent work! You’ve been a true role model for going from an idea over pitching it - within Red Hat as well as within the wider community - to building a steadily growing and evolving project. It’s stating the obvious, but it wouldn’t be for Debezium without you. Thanks for everything and looking forward very much to working with you and the community on this great project!

Onwards,

--Gunnar

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/06/12/debezium-0-5-1-released/index.html b/blog/2017/06/12/debezium-0-5-1-released/index.html index 87d4e04399..87dec81d44 100644 --- a/blog/2017/06/12/debezium-0-5-1-released/index.html +++ b/blog/2017/06/12/debezium-0-5-1-released/index.html @@ -1 +1 @@ - Debezium 0.5.1 Released

It’s my pleasure to announce the release of Debezium 0.5.1!

This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

Please refer to the changelog for an overview of all the 29 issues fixed in Debezium 0.5.1.

The Docker image containing Kafka Connect and all the Debezium 0.5.x connectors as well as the image containing Postgres and the Debezium logical decoding plug-in have been updated to 0.5.1, too.

As Debezium continues to evolve and grow, the number of people contributing to the project is also going up. The following people have sent in pull requests for this release: Aaron Rosenberg, Alexander Kukushkin, Brendan Maguire, Duncan Sands, David Leibovic, Jiri Pechanec, nacivida, Omar Al-Safi, Randall Hauch and Tom Bentley.

Thanks a lot to you and everyone else in the community contributing via feature requests, bug reports, discussions and questions!

What’s next

We’ve planned to do further bug fix releases for the 0.5.x line. Specifically, we’ll release a fix for DBZ-217 shortly, which is about the MySQL connector stumbling when getting across a corrupt event in the binlog.

In parallel we’re looking into Debezium connectors for SQL Server and Oracle. While we cannot promise anything yet in terms of when these will be ready to be published, we hope to have at least one of them ready some time soon. Stay tuned and get involved!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.5.1 Released

It’s my pleasure to announce the release of Debezium 0.5.1!

This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

Please refer to the changelog for an overview of all the 29 issues fixed in Debezium 0.5.1.

The Docker image containing Kafka Connect and all the Debezium 0.5.x connectors as well as the image containing Postgres and the Debezium logical decoding plug-in have been updated to 0.5.1, too.

As Debezium continues to evolve and grow, the number of people contributing to the project is also going up. The following people have sent in pull requests for this release: Aaron Rosenberg, Alexander Kukushkin, Brendan Maguire, Duncan Sands, David Leibovic, Jiri Pechanec, nacivida, Omar Al-Safi, Randall Hauch and Tom Bentley.

Thanks a lot to you and everyone else in the community contributing via feature requests, bug reports, discussions and questions!

What’s next

We’ve planned to do further bug fix releases for the 0.5.x line. Specifically, we’ll release a fix for DBZ-217 shortly, which is about the MySQL connector stumbling when getting across a corrupt event in the binlog.

In parallel we’re looking into Debezium connectors for SQL Server and Oracle. While we cannot promise anything yet in terms of when these will be ready to be published, we hope to have at least one of them ready some time soon. Stay tuned and get involved!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/08/17/debezium-0-5-2-is-out/index.html b/blog/2017/08/17/debezium-0-5-2-is-out/index.html index d38e88ada5..46a57d8e07 100644 --- a/blog/2017/08/17/debezium-0-5-2-is-out/index.html +++ b/blog/2017/08/17/debezium-0-5-2-is-out/index.html @@ -1 +1 @@ - Debezium 0.5.2 Is Out

I’m very happy to announce the release of Debezium 0.5.2!

As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

  • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

  • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

  • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

  • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

Speaking about the Docker images, we’ve set up nightly tags for the Debezium images on Docker Hub, allowing you to grab the latest improvements even before an official release has been cut. The connector archives are also deployed to the Sonatype OSS Maven repository.

Finally, we’ve spent some time to extend the documentation on some things not covered before:

  • Avro Serialization describes how to use the use the Avro converter and the Confluent Schema Registry instead of the JSON converter instead of the default JSON converter for serializing change events, resulting in much smaller message sizes; The Avro converter itself has also been added to the Debezium Docker image for Kafka Connect, so you can use it right away

  • Topic Routing describes how to use Debezium’s ByLogicalTableRouter single message transformation (SMT) for routing the change events from multiple tables into a single topic, which for instance is very useful when working with sharded tables

Please refer to the changelog for an overview of all the 19 issues fixed in Debezium 0.5.2.

The following people from the community have sent in pull requests for this release: Emrul Islam, Eric S. Kreiser, Mario Mueller, Matteo Capitanio, Omar Al-Safi and Satyajit Vegesna.

Thanks a lot to you and everyone else in the community for contributing to Debezium via feature requests, bug reports, discussions and questions!

What’s next

The next version of Debezium will be 0.6 (planned for September). This release is planned to bring the upgrade to Kafka 0.11. We’ll also look into an SMT for transforming the change events emitted by Debezium into a flat representation, which for instance will be very useful in conjunction with the JDBC sink connector.

While 0.6 is planned to be more of a "stabilization release", 0.7 should bring a long-awaited major feature: we’ve planned to explore support for Oracle and hopefully will do an initial release of a Debezium connector for that database.

In other words, exciting times are ahead! If you’d like to get involved, let us know. Check out the details below on how to get in touch.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.5.2 Is Out

I’m very happy to announce the release of Debezium 0.5.2!

As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

  • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

  • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

  • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

  • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

Speaking about the Docker images, we’ve set up nightly tags for the Debezium images on Docker Hub, allowing you to grab the latest improvements even before an official release has been cut. The connector archives are also deployed to the Sonatype OSS Maven repository.

Finally, we’ve spent some time to extend the documentation on some things not covered before:

  • Avro Serialization describes how to use the use the Avro converter and the Confluent Schema Registry instead of the JSON converter instead of the default JSON converter for serializing change events, resulting in much smaller message sizes; The Avro converter itself has also been added to the Debezium Docker image for Kafka Connect, so you can use it right away

  • Topic Routing describes how to use Debezium’s ByLogicalTableRouter single message transformation (SMT) for routing the change events from multiple tables into a single topic, which for instance is very useful when working with sharded tables

Please refer to the changelog for an overview of all the 19 issues fixed in Debezium 0.5.2.

The following people from the community have sent in pull requests for this release: Emrul Islam, Eric S. Kreiser, Mario Mueller, Matteo Capitanio, Omar Al-Safi and Satyajit Vegesna.

Thanks a lot to you and everyone else in the community for contributing to Debezium via feature requests, bug reports, discussions and questions!

What’s next

The next version of Debezium will be 0.6 (planned for September). This release is planned to bring the upgrade to Kafka 0.11. We’ll also look into an SMT for transforming the change events emitted by Debezium into a flat representation, which for instance will be very useful in conjunction with the JDBC sink connector.

While 0.6 is planned to be more of a "stabilization release", 0.7 should bring a long-awaited major feature: we’ve planned to explore support for Oracle and hopefully will do an initial release of a Debezium connector for that database.

In other words, exciting times are ahead! If you’d like to get involved, let us know. Check out the details below on how to get in touch.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/09/21/debezium-0-6-0-released/index.html b/blog/2017/09/21/debezium-0-6-0-released/index.html index fd543bb91f..8a2c6585cf 100644 --- a/blog/2017/09/21/debezium-0-6-0-released/index.html +++ b/blog/2017/09/21/debezium-0-6-0-released/index.html @@ -1 +1 @@ - Debezium 0.6 Is Out

What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

What’s in it?

Debezium is now built against and tested with Apache Kafka 0.11.0. Also the Debezium Docker images have been updated do that version (DBZ-305). You should make sure to read the Kafka update guide when upgrading from an earlier version.

To improve integration with existing Kafka sink connectors such as the JDBC sink connector or the Elasticsearch connector, Debezium provides a new single message transformation (DBZ-226). This SMT converts Debezium’s CDC event structure into a more conventional structure commonly used in other sink and non-CDC source connectors where the message represents the state of the inserted or updated row, or null in the case of a deleted row. This lets your for instance capture the changes from a table in MySQL and update a corresponding table in a Postgres database accordingly. We’ll provide a complete example showing the usage of that new SMT in the next few days.

If you are doing the Debezium tutorial, you will like the new Docker Compose set-up provided in the examples repo (DBZ-127). This lets you start all the required Docker containers with a single command.

New connector features

Now let’s take a look at some of the changes around the specific Debezium connectors. The MySQL connector has seen multiple improvements, e.g.:

  • Snapshot consistency wasn’t guaranteed before in some corner cases (DBZ-210); that’s fixed now

  • DEC and FIXED types supported in the DDL parser (DBZ-359; thanks to Liu Hanlin!)

  • UNION clause supported for ALTER TABLE (DBZ-346)

For the MongoDB connector, the way of serializing ids into the key payload of CDC events has changed (DBZ-306). The new format allows to read back ids into the correct type. We also took the opportunity and made the id field name consistent with the other connectors, i.e. it’s "id" now. Note: that change may break existing consumers, so some work on your end may be required, depending on the implementation of your consumer. The details are discussed in the release notes and the format of message keys is described in depth in the connector documentation. Kudos to Hans-Peter Grahsl who contributed on this feature!

Another nice improvement for this connector is support for SSL connections (DBZ-343).

Finally, the Postgres connector learned some new tricks, too:

  • Support for variable-width numeric columns (DBZ-318)

  • Views won’t stop the connector any more (DBZ-319)

  • Warnings and notifications emitted by the server are correctly forwarded to the log (DBZ-279)

Please refer to the changelog for an overview of all the 20 issues fixed in Debezium 0.6.0.

What’s next?

High on our agenda is exploring support for Oracle (DBZ-20). We are also looking into using another logical decoding plug-in (wal2json) for the Postgres connector, which would enable to use Debezium with Postgres instances running on Amazon RDS. Another feature being worked on by community member Moira Tagle is support for updates to the table.whitelist for existing connector instances. Finally, we’ve planned to test and adapt the existing MySQL connector for providing CDC functionality to MariaDB.

Debezium 0.7 with one or more out of those features as well as hopefully some others will be released later this year. We’ll likely also do further 0.6.x releases with bug fixes as required.

You’d like to contribute? That’s great - let us know and we’ll get you started. Check out the details below on how to get in touch.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.6 Is Out

What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

What’s in it?

Debezium is now built against and tested with Apache Kafka 0.11.0. Also the Debezium Docker images have been updated do that version (DBZ-305). You should make sure to read the Kafka update guide when upgrading from an earlier version.

To improve integration with existing Kafka sink connectors such as the JDBC sink connector or the Elasticsearch connector, Debezium provides a new single message transformation (DBZ-226). This SMT converts Debezium’s CDC event structure into a more conventional structure commonly used in other sink and non-CDC source connectors where the message represents the state of the inserted or updated row, or null in the case of a deleted row. This lets your for instance capture the changes from a table in MySQL and update a corresponding table in a Postgres database accordingly. We’ll provide a complete example showing the usage of that new SMT in the next few days.

If you are doing the Debezium tutorial, you will like the new Docker Compose set-up provided in the examples repo (DBZ-127). This lets you start all the required Docker containers with a single command.

New connector features

Now let’s take a look at some of the changes around the specific Debezium connectors. The MySQL connector has seen multiple improvements, e.g.:

  • Snapshot consistency wasn’t guaranteed before in some corner cases (DBZ-210); that’s fixed now

  • DEC and FIXED types supported in the DDL parser (DBZ-359; thanks to Liu Hanlin!)

  • UNION clause supported for ALTER TABLE (DBZ-346)

For the MongoDB connector, the way of serializing ids into the key payload of CDC events has changed (DBZ-306). The new format allows to read back ids into the correct type. We also took the opportunity and made the id field name consistent with the other connectors, i.e. it’s "id" now. Note: that change may break existing consumers, so some work on your end may be required, depending on the implementation of your consumer. The details are discussed in the release notes and the format of message keys is described in depth in the connector documentation. Kudos to Hans-Peter Grahsl who contributed on this feature!

Another nice improvement for this connector is support for SSL connections (DBZ-343).

Finally, the Postgres connector learned some new tricks, too:

  • Support for variable-width numeric columns (DBZ-318)

  • Views won’t stop the connector any more (DBZ-319)

  • Warnings and notifications emitted by the server are correctly forwarded to the log (DBZ-279)

Please refer to the changelog for an overview of all the 20 issues fixed in Debezium 0.6.0.

What’s next?

High on our agenda is exploring support for Oracle (DBZ-20). We are also looking into using another logical decoding plug-in (wal2json) for the Postgres connector, which would enable to use Debezium with Postgres instances running on Amazon RDS. Another feature being worked on by community member Moira Tagle is support for updates to the table.whitelist for existing connector instances. Finally, we’ve planned to test and adapt the existing MySQL connector for providing CDC functionality to MariaDB.

Debezium 0.7 with one or more out of those features as well as hopefully some others will be released later this year. We’ll likely also do further 0.6.x releases with bug fixes as required.

You’d like to contribute? That’s great - let us know and we’ll get you started. Check out the details below on how to get in touch.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/09/25/streaming-to-another-database/index.html b/blog/2017/09/25/streaming-to-another-database/index.html index 2fed8fbb4c..2ec2501387 100644 --- a/blog/2017/09/25/streaming-to-another-database/index.html +++ b/blog/2017/09/25/streaming-to-another-database/index.html @@ -78,4 +78,4 @@ -----------+------+------------+----------------------- ... Doe | 1005 | John | john.doe@example.com -(5 rows)

Summary

We set up a simple streaming data pipeline to replicate data in near real-time from a MySQL database to a PostgreSQL database. We accomplished this using Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few SMTs — all without having to write any code. And since it is a streaming system, it will continue to capture all changes made to the MySQL database and replicating them in near real time.

What’s next?

In a future blog post we will reproduce the same scenario with Elasticsearch as a target for events.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +(5 rows)

Summary

We set up a simple streaming data pipeline to replicate data in near real-time from a MySQL database to a PostgreSQL database. We accomplished this using Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few SMTs — all without having to write any code. And since it is a streaming system, it will continue to capture all changes made to the MySQL database and replicating them in near real time.

What’s next?

In a future blog post we will reproduce the same scenario with Elasticsearch as a target for events.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/10/26/debezium-0-6-1-released/index.html b/blog/2017/10/26/debezium-0-6-1-released/index.html index 6653508ee7..c0347e0b76 100644 --- a/blog/2017/10/26/debezium-0-6-1-released/index.html +++ b/blog/2017/10/26/debezium-0-6-1-released/index.html @@ -1 +1 @@ - Debezium 0.6.1 Is Released

Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

Let’s take a closer look at some of the changes.

New connector option for controlling BIGINT UNSIGNED representation

BIGINT UNSIGNED columns from MySQL databases have been represented using Kafka Connect’s Decimal type until now. This type allows to represent all possible values of such columns, but its based on a byte array, so it can be a bit cumbersome to handle for consumers. Therefore we added a new option named bigint.unsigned.handling.mode to the MySQL connector that allows to represent such columns using long.

For the very most cases that’s the preferable option, only if your column contains values larger than 2^63 (which MySQL doesn’t recommend due to potential value losses when performing calculations), you should stick to the Decimal representation.

Using long will be the default as of Debezium 0.7, for the 0.6.x timeline we decided to go with the previous behavior (i.e. using Decimal) for the sake of backwards compatibility.

Thanks a lot to Ben Williams who contributed this feature!

New example Docker images and Docker Compose files

In the Debezium examples repository we now provide Docker Compose files which let you run the tutorial with all the three databases we currently support, MySQL, Postgres and MongoDB.

Just choose the Compose file for your preferred database and get a all the required components (ZooKeeper, Apache Kafka, Kafka Connect and the database) running within a few seconds.

We’ve also deployed Docker images for Postgres and MongoDB to the Debezium organization on Docker Hub, so you got some data to play with.

Version upgrades

We’ve upgraded our images from Kafka 0.11.0.0 to 0.11.0.1. Also the binlog client library used by the MySQL connector was upgraded from 0.9.0 to 0.13.0.

Bugfixes

Finally, several bugs were fixed in 0.6.1. E.g. you can now name a column column in MySQL (DBZ-408), generated DROP TEMP TABLE statements won’t flood the DB history topic (DBZ-295) and we’ve fixed a case where the Postgres connector would stop working due to an internal error but fail to report though via the task/connector status (DBZ-380).

Please see the full change log for more details and the complete list of fixed issues.

What’s next?

The work on Debezium 0.7 has already begun and we’ve merged the first set of changes. You can expect to see support for using the wal2json logical decoding plug-in with the Postgres connector, which will finally allow it to use Debezium with Postgres on Amazon RDS! We’ve also started our explorations of providing a connector for Oracle (DBZ-20) and hope to report some progress here soon.

While the work on Debezium 0.7 continues, you will likely continue to see one or more 0.6.x bugfix releases. We’ve automated the release process as much as possible, making it a breeze to ship a new release and getting fixes into your hands quickly.

If you’d like to contribute, please let us know. We’re happy about any help and will work with you to get you started quickly. Check out the details below on how to get in touch.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.6.1 Is Released

Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

Let’s take a closer look at some of the changes.

New connector option for controlling BIGINT UNSIGNED representation

BIGINT UNSIGNED columns from MySQL databases have been represented using Kafka Connect’s Decimal type until now. This type allows to represent all possible values of such columns, but its based on a byte array, so it can be a bit cumbersome to handle for consumers. Therefore we added a new option named bigint.unsigned.handling.mode to the MySQL connector that allows to represent such columns using long.

For the very most cases that’s the preferable option, only if your column contains values larger than 2^63 (which MySQL doesn’t recommend due to potential value losses when performing calculations), you should stick to the Decimal representation.

Using long will be the default as of Debezium 0.7, for the 0.6.x timeline we decided to go with the previous behavior (i.e. using Decimal) for the sake of backwards compatibility.

Thanks a lot to Ben Williams who contributed this feature!

New example Docker images and Docker Compose files

In the Debezium examples repository we now provide Docker Compose files which let you run the tutorial with all the three databases we currently support, MySQL, Postgres and MongoDB.

Just choose the Compose file for your preferred database and get a all the required components (ZooKeeper, Apache Kafka, Kafka Connect and the database) running within a few seconds.

We’ve also deployed Docker images for Postgres and MongoDB to the Debezium organization on Docker Hub, so you got some data to play with.

Version upgrades

We’ve upgraded our images from Kafka 0.11.0.0 to 0.11.0.1. Also the binlog client library used by the MySQL connector was upgraded from 0.9.0 to 0.13.0.

Bugfixes

Finally, several bugs were fixed in 0.6.1. E.g. you can now name a column column in MySQL (DBZ-408), generated DROP TEMP TABLE statements won’t flood the DB history topic (DBZ-295) and we’ve fixed a case where the Postgres connector would stop working due to an internal error but fail to report though via the task/connector status (DBZ-380).

Please see the full change log for more details and the complete list of fixed issues.

What’s next?

The work on Debezium 0.7 has already begun and we’ve merged the first set of changes. You can expect to see support for using the wal2json logical decoding plug-in with the Postgres connector, which will finally allow it to use Debezium with Postgres on Amazon RDS! We’ve also started our explorations of providing a connector for Oracle (DBZ-20) and hope to report some progress here soon.

While the work on Debezium 0.7 continues, you will likely continue to see one or more 0.6.x bugfix releases. We’ve automated the release process as much as possible, making it a breeze to ship a new release and getting fixes into your hands quickly.

If you’d like to contribute, please let us know. We’re happy about any help and will work with you to get you started quickly. Check out the details below on how to get in touch.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/11/11/debezium-at-devoxx-belgium/index.html b/blog/2017/11/11/debezium-at-devoxx-belgium/index.html index 3a104bcfa6..b499268ce8 100644 --- a/blog/2017/11/11/debezium-at-devoxx-belgium/index.html +++ b/blog/2017/11/11/debezium-at-devoxx-belgium/index.html @@ -1 +1 @@ - Debezium at Devoxx Belgium

Debezium’s project lead Gunnar Morling gave a few talks during recent Devoxx Belgium 2017. One of his talks was dedicated to Debezium and change data capture in general.

If you are interested in those topics and you want to obtain a fast and simple introduction to it, do not hesitate and watch the talk. Batteries and demo included!

The slide deck is available, too:

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium at Devoxx Belgium

Debezium’s project lead Gunnar Morling gave a few talks during recent Devoxx Belgium 2017. One of his talks was dedicated to Debezium and change data capture in general.

If you are interested in those topics and you want to obtain a fast and simple introduction to it, do not hesitate and watch the talk. Batteries and demo included!

The slide deck is available, too:

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/11/15/debezium-0-6-2-released/index.html b/blog/2017/11/15/debezium-0-6-2-released/index.html index 15d5adb576..b17de39ada 100644 --- a/blog/2017/11/15/debezium-0-6-2-released/index.html +++ b/blog/2017/11/15/debezium-0-6-2-released/index.html @@ -1 +1 @@ - Debezium 0.6.2 Is Released

We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

PostgreSQL Connector

The big news for the Postgres connector is that Debezium now runs against PostgreSQL 10 thanks to a contribution from Scofield Xu. As a part of this change we are providing a Docker Image with PostgreSQL 10, too, and we have set up a daily run of our integration tests against it.

If you are building Postgres yourself using the Debezium logical decoding plug-in, you can save quite some megabytes if you don’t need the PostGIS geometric extension: thanks to the work by Danila Kiver, it’s now possible to omit that extension.

MySQL Connector

We’ve received multiple reports related to parsing MySQL DDL statements, e.g. there were a few specific invocations of the ALTER TABLE statement which weren’t handled correctly. Those as well as a few other parser bugs have been fixed.

If you work with the TIMESTAMP column type and your Kafka Connect server isn’t using UTC as timezone, then the fix for DBZ-260 is applying to you. In that case, the ISO 8601 formatted String emitted by Debezium would have, incorrectly, contained the UTC date and time plus the zone offset (as per the time zone the Kafka Connect server is located in) before. Whereas now it will contain the date and time adjusted to the zone offset. This may require adjustments to to downstream consumers if they were relying on the previous, incorrect behavior.

DBZ-217 gives you more flexibility for handling corrupt events encountered in the MySQL binlog. By default, the connector will stop at the problematic event in such case. But you now also have the option to just log the event and its position and continue the processing after it.

Another nice improvement for the MySQL connector is a much reduced CPU load after the snapshot has been completed, when using the "snapshot only" mode (DBZ-396).

MongoDB Connector

This connector received an important fix applying when more than one thread is used to performing the initial snapshot (DBZ-438). Before, it could happen that single messages got lost during snapshotting which is fixed now.

Examples and Docker Images

We have expanded our examples repository with an Avro example, which may be interesting to you if you’d like to not work with JSON messages but rather the compact Avro binary format and the Confluent schema registry.

As a part of our release process we are now creating micro tags for our Docker images for every released version. While tags in the format x.y.z are fixed in time, tags in the format x.y are rolling updates and always point to the latest micro release of that image.

Please see the full change log for more details and the complete list of fixed issues.

What’s next?

The Debezium 0.7 release is planned to be out in two to three weeks from now.

It will contain the move to Apache Kafka 1.0.0 and bring support for the wal2json logical decoding plug-in for Postgres. This will eventually allow to use the Debezium Postgres connector on Amazon RDS (once the correct wal2json version is available there).

In parallel, the work around handling updates to the whitelist configuration of the MySQL connector continues (it may be ready for 0.7.0), and so does the work on the Oracle connector (which will be shipping in a future release).

If you’d like to contribute, please let us know. We’re happy about any help and will work with you to get you started quickly. Check out the details below on how to get in touch.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.6.2 Is Released

We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

PostgreSQL Connector

The big news for the Postgres connector is that Debezium now runs against PostgreSQL 10 thanks to a contribution from Scofield Xu. As a part of this change we are providing a Docker Image with PostgreSQL 10, too, and we have set up a daily run of our integration tests against it.

If you are building Postgres yourself using the Debezium logical decoding plug-in, you can save quite some megabytes if you don’t need the PostGIS geometric extension: thanks to the work by Danila Kiver, it’s now possible to omit that extension.

MySQL Connector

We’ve received multiple reports related to parsing MySQL DDL statements, e.g. there were a few specific invocations of the ALTER TABLE statement which weren’t handled correctly. Those as well as a few other parser bugs have been fixed.

If you work with the TIMESTAMP column type and your Kafka Connect server isn’t using UTC as timezone, then the fix for DBZ-260 is applying to you. In that case, the ISO 8601 formatted String emitted by Debezium would have, incorrectly, contained the UTC date and time plus the zone offset (as per the time zone the Kafka Connect server is located in) before. Whereas now it will contain the date and time adjusted to the zone offset. This may require adjustments to to downstream consumers if they were relying on the previous, incorrect behavior.

DBZ-217 gives you more flexibility for handling corrupt events encountered in the MySQL binlog. By default, the connector will stop at the problematic event in such case. But you now also have the option to just log the event and its position and continue the processing after it.

Another nice improvement for the MySQL connector is a much reduced CPU load after the snapshot has been completed, when using the "snapshot only" mode (DBZ-396).

MongoDB Connector

This connector received an important fix applying when more than one thread is used to performing the initial snapshot (DBZ-438). Before, it could happen that single messages got lost during snapshotting which is fixed now.

Examples and Docker Images

We have expanded our examples repository with an Avro example, which may be interesting to you if you’d like to not work with JSON messages but rather the compact Avro binary format and the Confluent schema registry.

As a part of our release process we are now creating micro tags for our Docker images for every released version. While tags in the format x.y.z are fixed in time, tags in the format x.y are rolling updates and always point to the latest micro release of that image.

Please see the full change log for more details and the complete list of fixed issues.

What’s next?

The Debezium 0.7 release is planned to be out in two to three weeks from now.

It will contain the move to Apache Kafka 1.0.0 and bring support for the wal2json logical decoding plug-in for Postgres. This will eventually allow to use the Debezium Postgres connector on Amazon RDS (once the correct wal2json version is available there).

In parallel, the work around handling updates to the whitelist configuration of the MySQL connector continues (it may be ready for 0.7.0), and so does the work on the Oracle connector (which will be shipping in a future release).

If you’d like to contribute, please let us know. We’re happy about any help and will work with you to get you started quickly. Check out the details below on how to get in touch.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/12/15/debezium-0-7-0-released/index.html b/blog/2017/12/15/debezium-0-7-0-released/index.html index 5137b52458..c1ae1b5be7 100644 --- a/blog/2017/12/15/debezium-0-7-0-released/index.html +++ b/blog/2017/12/15/debezium-0-7-0-released/index.html @@ -1 +1 @@ - Debezium 0.7.0 Is Released

It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

Now let’s take a closer look at some of new features.

Based on Apache Kafka 1.0

A few weeks ago the Apache Kafka team has released version 1.0.0. This was an important milestone for the Kafka community, and we now can happily declare that Debezium is built against and runs on that Apache Kafka version. Our Docker images were also promoted to contain Apache Kafka and Kafka Connect 1.0.0.

PostgreSQL Connector

The big news for the PostgreSQL connector is that it now supports the wal2json logical decoding plugin as an alternative to the existing DecoderBufs plug-in. This means that you now can use Debezium to stream changes out of PostgreSQL on Amazon RDS, as wal2json is the logical decoding plugin used in this environment. Many thanks to Robert Coup who significantly contributed to this feature.

Working on this plug-in, we noticed that there was a potential race condition when it comes to applying changes to the schema of captured tables. In that case it could have happened that a number of messages pertaining to data changes done before the schema change were emitted using the new schema. With the exception of a few corner cases (which are described here), this has been addressed when using Debezium’s own DecoderBufs plug-in. So it’s highly recommended to upgrade the DecoderBufs plug-in to the new version before upgrading the Debezium connector. We’ve also worked closely with the author of the wal2json plug-in (big thanks for the quick help!) to prevent the issue when using the wal2json plug-in.

While the Debezium Docker images for Postgres already come with the latest version of DecoderBufs and wal2json, RDS for now is still using an older version of wal2json. Until this has been updated, special attention must be paid when applying schema changes to captured tables. Please see the changelog for a in-depth description of this issue and ways to mitigate it.

There are new daily running CI jobs that verify that the wal2json plugin passes our test suite. For the foreseeable future we’ll support both, wal2json as well as the existing DecoderBufs plug-in. The latter should be more efficient due to the usage of the Protocol Buffers binary format, whereas the former comes in handy for RDS or other cloud environments where you don’t have control over the installed logical decoding plug-ins, but wal2json is available.

In other news on the Postgres connector, Andrey Pustovetov discovered and proposed a fix for a multi-threading bug that could have put the connector into an undefined state if a rebalance in the Connect cluster was triggered during snapshotting. Thanks, Andrey!

MySQL Connector

In the MySQL connector we’ve fixed two issues which affect the default mapping of certain column types.

Following up to the new BIGINT UNSIGNED mapping introduced in Debezium 0.6.1, this type is now encoded as int64 in Debezium messages by default as it is easier for (polyglot) clients to work with. This is a reasonable mapping for the vast majority of cases. Only when using values > 2^63, you should switch it back to the Decimal logical type which is a bit more cumbersome to handle, though. This should be a rare situation, as MySQL advices against using unsigned values > 2^63 due to potential value losses when performing DB-side calculations. Please see the connector documentation for the details.

Rene Kerner has improved the support for the MySQL TIME type. MySQL allows to store values larger than 23:59:59 in such columns, and the type int32 which was previously used for TIME(0-3) columns isn’t enough to convey the entire possible value range. Therefore all TIME columns in MySQL are by default represented as int64 now, using the io.debezium.time.MicroTime logical type, i.e. the value represents micro-seconds. If needed, you can switch to the previous mapping by setting time.precision.mode to adaptive, but you should only do so if you’re sure that you only ever will have values that fit into int32. This option is only kept for a transitioning period and will be removed in a future release.

Recently we got a report that MySQL’s binlog can contain ROLLBACK statements and thus transactions that are actually not committed. Of course no data change messages should be emitted in this situation. This e.g. can be the case when temporary tables are dropped. So we introduced a look-ahead buffer functionality that reads the binlog by transaction and excludes those that were rolled back. This feature should be considered incubating and is disabled by default for the time being. We’d like to gather your feedback on this, so if you’d benefit from this feature, please give it a try and let us know if you run into any issues. For further details please refer to the binlog.buffer.size setting in the MySQL connector docs.

Andras Istvan Nagy came with the idea and implemented a way for explicitly selecting the rows from each table that will be part of the snapshotting process. This can for instance be very useful if you work with soft deletes and would like to exclude all logically deleted records from snapshotting.

Please see the full change log for more details and the complete list of fixed issues.

What’s next?

The Debezium 0.7.1 release is planned to be out roughly two weeks after Christmas.

It will contain a new SMT that will unwind MongoDB change events into a regular JSON consumable by sink connectors.

A big overhaul of GEOMETRY types is in progress. When completed, all GEOMETRY types will be supported by both MySQL and PostgreSQL connectors and they will be available in standard WKB format for easy consumption by polyglot clients.

There is ongoing work for the MySQL connector to allow dynamic update of table.whitelist option. This will allow the user to re-configure the set of tables captured without need to re-create connector.

If you’d like to contribute, please let us know. We’re happy about any help and will work with you to get you started quickly. Check out the details below on how to get in touch.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.7.0 Is Released

It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

Now let’s take a closer look at some of new features.

Based on Apache Kafka 1.0

A few weeks ago the Apache Kafka team has released version 1.0.0. This was an important milestone for the Kafka community, and we now can happily declare that Debezium is built against and runs on that Apache Kafka version. Our Docker images were also promoted to contain Apache Kafka and Kafka Connect 1.0.0.

PostgreSQL Connector

The big news for the PostgreSQL connector is that it now supports the wal2json logical decoding plugin as an alternative to the existing DecoderBufs plug-in. This means that you now can use Debezium to stream changes out of PostgreSQL on Amazon RDS, as wal2json is the logical decoding plugin used in this environment. Many thanks to Robert Coup who significantly contributed to this feature.

Working on this plug-in, we noticed that there was a potential race condition when it comes to applying changes to the schema of captured tables. In that case it could have happened that a number of messages pertaining to data changes done before the schema change were emitted using the new schema. With the exception of a few corner cases (which are described here), this has been addressed when using Debezium’s own DecoderBufs plug-in. So it’s highly recommended to upgrade the DecoderBufs plug-in to the new version before upgrading the Debezium connector. We’ve also worked closely with the author of the wal2json plug-in (big thanks for the quick help!) to prevent the issue when using the wal2json plug-in.

While the Debezium Docker images for Postgres already come with the latest version of DecoderBufs and wal2json, RDS for now is still using an older version of wal2json. Until this has been updated, special attention must be paid when applying schema changes to captured tables. Please see the changelog for a in-depth description of this issue and ways to mitigate it.

There are new daily running CI jobs that verify that the wal2json plugin passes our test suite. For the foreseeable future we’ll support both, wal2json as well as the existing DecoderBufs plug-in. The latter should be more efficient due to the usage of the Protocol Buffers binary format, whereas the former comes in handy for RDS or other cloud environments where you don’t have control over the installed logical decoding plug-ins, but wal2json is available.

In other news on the Postgres connector, Andrey Pustovetov discovered and proposed a fix for a multi-threading bug that could have put the connector into an undefined state if a rebalance in the Connect cluster was triggered during snapshotting. Thanks, Andrey!

MySQL Connector

In the MySQL connector we’ve fixed two issues which affect the default mapping of certain column types.

Following up to the new BIGINT UNSIGNED mapping introduced in Debezium 0.6.1, this type is now encoded as int64 in Debezium messages by default as it is easier for (polyglot) clients to work with. This is a reasonable mapping for the vast majority of cases. Only when using values > 2^63, you should switch it back to the Decimal logical type which is a bit more cumbersome to handle, though. This should be a rare situation, as MySQL advices against using unsigned values > 2^63 due to potential value losses when performing DB-side calculations. Please see the connector documentation for the details.

Rene Kerner has improved the support for the MySQL TIME type. MySQL allows to store values larger than 23:59:59 in such columns, and the type int32 which was previously used for TIME(0-3) columns isn’t enough to convey the entire possible value range. Therefore all TIME columns in MySQL are by default represented as int64 now, using the io.debezium.time.MicroTime logical type, i.e. the value represents micro-seconds. If needed, you can switch to the previous mapping by setting time.precision.mode to adaptive, but you should only do so if you’re sure that you only ever will have values that fit into int32. This option is only kept for a transitioning period and will be removed in a future release.

Recently we got a report that MySQL’s binlog can contain ROLLBACK statements and thus transactions that are actually not committed. Of course no data change messages should be emitted in this situation. This e.g. can be the case when temporary tables are dropped. So we introduced a look-ahead buffer functionality that reads the binlog by transaction and excludes those that were rolled back. This feature should be considered incubating and is disabled by default for the time being. We’d like to gather your feedback on this, so if you’d benefit from this feature, please give it a try and let us know if you run into any issues. For further details please refer to the binlog.buffer.size setting in the MySQL connector docs.

Andras Istvan Nagy came with the idea and implemented a way for explicitly selecting the rows from each table that will be part of the snapshotting process. This can for instance be very useful if you work with soft deletes and would like to exclude all logically deleted records from snapshotting.

Please see the full change log for more details and the complete list of fixed issues.

What’s next?

The Debezium 0.7.1 release is planned to be out roughly two weeks after Christmas.

It will contain a new SMT that will unwind MongoDB change events into a regular JSON consumable by sink connectors.

A big overhaul of GEOMETRY types is in progress. When completed, all GEOMETRY types will be supported by both MySQL and PostgreSQL connectors and they will be available in standard WKB format for easy consumption by polyglot clients.

There is ongoing work for the MySQL connector to allow dynamic update of table.whitelist option. This will allow the user to re-configure the set of tables captured without need to re-create connector.

If you’d like to contribute, please let us know. We’re happy about any help and will work with you to get you started quickly. Check out the details below on how to get in touch.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2017/12/20/debezium-0-7-1-released/index.html b/blog/2017/12/20/debezium-0-7-1-released/index.html index 9fa8ac7d2e..e94762e827 100644 --- a/blog/2017/12/20/debezium-0-7-1-released/index.html +++ b/blog/2017/12/20/debezium-0-7-1-released/index.html @@ -1 +1 @@ - Debezium 0.7.1 Is Released

Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.7.1 Is Released

Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/01/17/streaming-to-elasticsearch/index.html b/blog/2018/01/17/streaming-to-elasticsearch/index.html index c1ef0e5543..8ee2c00bac 100644 --- a/blog/2018/01/17/streaming-to-elasticsearch/index.html +++ b/blog/2018/01/17/streaming-to-elasticsearch/index.html @@ -121,4 +121,4 @@ "email" : "john.doe@example.com" } } -...

Summary

We set up a complex streaming data pipeline to synchronize a MySQL database with another database and also with an Elasticsearch instance. We managed to keep the same identifier across all systems which allows us to correlate records across the system as a whole.

Propagating data changes from a primary database in near realtime to a search engine such as Elasticsearch enables many interesting use cases. Besides different applications of fulltext search one could for instance also think about creating dashboards and all kinds of visualizations using Kibana, to gain further insight into the data.

If you’d like to try out this set-up yourself, just clone the project from our examples repo. In case you need help, have feature requests or would like to share your experiences with this pipeline, please let us know in the comments below.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +...

Summary

We set up a complex streaming data pipeline to synchronize a MySQL database with another database and also with an Elasticsearch instance. We managed to keep the same identifier across all systems which allows us to correlate records across the system as a whole.

Propagating data changes from a primary database in near realtime to a search engine such as Elasticsearch enables many interesting use cases. Besides different applications of fulltext search one could for instance also think about creating dashboards and all kinds of visualizations using Kibana, to gain further insight into the data.

If you’d like to try out this set-up yourself, just clone the project from our examples repo. In case you need help, have feature requests or would like to share your experiences with this pipeline, please let us know in the comments below.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/01/25/debezium-0-7-2-released/index.html b/blog/2018/01/25/debezium-0-7-2-released/index.html index 70d93855d8..9659de51e5 100644 --- a/blog/2018/01/25/debezium-0-7-2-released/index.html +++ b/blog/2018/01/25/debezium-0-7-2-released/index.html @@ -1 +1 @@ - Debezium 0.7.2 Is Released

It’s my pleasure to announce the release of Debezium 0.7.2!

Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

Now let’s take a closer look at some of new features.

MySQL Connector

The biggest change of the MySQL connector is support for geo-spatial column types such as GEOMETRY, POLYGON, MULTIPOINT etc.

There are two new logical field types — io.debezium.data.geometry.Geometry and io.debezium.data.geometry.Geography — for representing geo-spatial columns in change data messages. These types represent geo-spatial data via WKB ("well-known binary") and SRID (coordinate reference system identifier), allowing downstream consumers to interpret the change events using any existing library with support for parsing WKB. A blog post with more details on this will follow soon.

The new snapshotting mode schema_only_recovery comes in handy when for some reason you lost (parts of) the DB history topic used by the MySQL connector. It’s also useful if you’d like to compact that topic by re-creating it. Please refer to the connector documentation for the details of this mode, esp. when it’s safe (and when not) to make use of it.

Another new feature related to managing the size of the DB history topic is the option to control whether to include all DDL events or only those pertaining to tables captured as per the whitelist/blacklist configuration. Again, check out the connector docs to learn more about the specifics of that setting.

Finally, we fixed a few shortcomings of the MySQL DDL parser (DBZ-524, DBZ-530).

PostgreSQL Connector

Similar to the MySQL connector, there’s largely improved support for geo-spatial columns in Postgres now. More specifically, PostGIS column types can be represented in change data events now. Thanks a lot for Robert Coup who contributed this feature!

Also the support for Postgres array columns has been expanded, e.g. we now support to track changes to VARCHAR and DATE array columns. Note that the connector doesn’t yet work with geo-spatial array columns (should you ever have those), but this should be added soon, too.

If you’d like to include just a subset of the rows of a captured table in snapshots, you may like the ability to specify dedicated SELECT statements to do so. For instance this can be used to exclude any logically deleted records — which you can recognize based on some flag in that table — from the snapshot.

A few bugs in this connector where reported and fixed by community members, too, e.g. the connector can be correctly paused now (thanks, Andrey Pustovetov), and we fixed an issue which could potentially have committed an incorrect offset to Kafka Connect (thanks, Thon Mekathikom).

MongoDB Connector

If you’ve ever compared the structures of change events emitted by the Debezium RDBMS connectors (MySQL, Postgres) and the MongoDB connector, you’ll know that the message structure of the latter is a bit different than the others. Due to the schemaless nature of MongoDB, the change events essentially contain a String with a JSON representation of the applied insert or patch. This structure cannot be consumed by existing sink connectors, such as the Confluent connectors for JDBC or Elasticsearch.

This gets possible now by means of a newly added single message transformation (SMT), which parses these JSON strings and creates a structured Kafka Connect record from it (thanks, Sairam Polavarapu!). When applying this SMT to the JDBC sink connector, you can now stream data changes from MongoDB to any supported relational database.

Note that this SMT is work-in-progress, details of its emitted message structure may still change. Also there are some inherent limitations to what can be achieved with it, if you e.g. have arrays in your MongoDB documents, the record created by this SMT will be structured accordingly, but many sink connectors cannot process such structure.

We have some ideas for further development here, e.g. there could be an option for flattening out (non-array) nested structures, so that e.g. { "address" { "street" : "..." } } would be represented as address_street, which then could be consumed by sink connectors expecting a flat structure.

The new SMT is described in detail in our docs.

What’s next?

Please see the full change log for more details and the complete list of issues fixed in Debezium 0.7.2.

The 0.7.3 release is scheduled for February 14th.

We’ll focus on some more bug fixes, also we’re working on having Debezium regulary emit heartbeat messages to a dedicated topic. This will be practical for diagnostic purposes but also help to regularly trigger commits of the offset in Kafka Connect. That’s beneficial in certain situations when capturing tables which only very infrequently change.

We’ve also worked out a roadmap describing our ideas for future work on Debezium, going beyond the next bugfix releases. While nothing is cast in stone, this is our idea of the features to add in the coming months. If you miss anything important on this roadmap, please tell us either in the comments below or send a message to our Google group. Looking forward to your feedback!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.7.2 Is Released

It’s my pleasure to announce the release of Debezium 0.7.2!

Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

Now let’s take a closer look at some of new features.

MySQL Connector

The biggest change of the MySQL connector is support for geo-spatial column types such as GEOMETRY, POLYGON, MULTIPOINT etc.

There are two new logical field types — io.debezium.data.geometry.Geometry and io.debezium.data.geometry.Geography — for representing geo-spatial columns in change data messages. These types represent geo-spatial data via WKB ("well-known binary") and SRID (coordinate reference system identifier), allowing downstream consumers to interpret the change events using any existing library with support for parsing WKB. A blog post with more details on this will follow soon.

The new snapshotting mode schema_only_recovery comes in handy when for some reason you lost (parts of) the DB history topic used by the MySQL connector. It’s also useful if you’d like to compact that topic by re-creating it. Please refer to the connector documentation for the details of this mode, esp. when it’s safe (and when not) to make use of it.

Another new feature related to managing the size of the DB history topic is the option to control whether to include all DDL events or only those pertaining to tables captured as per the whitelist/blacklist configuration. Again, check out the connector docs to learn more about the specifics of that setting.

Finally, we fixed a few shortcomings of the MySQL DDL parser (DBZ-524, DBZ-530).

PostgreSQL Connector

Similar to the MySQL connector, there’s largely improved support for geo-spatial columns in Postgres now. More specifically, PostGIS column types can be represented in change data events now. Thanks a lot for Robert Coup who contributed this feature!

Also the support for Postgres array columns has been expanded, e.g. we now support to track changes to VARCHAR and DATE array columns. Note that the connector doesn’t yet work with geo-spatial array columns (should you ever have those), but this should be added soon, too.

If you’d like to include just a subset of the rows of a captured table in snapshots, you may like the ability to specify dedicated SELECT statements to do so. For instance this can be used to exclude any logically deleted records — which you can recognize based on some flag in that table — from the snapshot.

A few bugs in this connector where reported and fixed by community members, too, e.g. the connector can be correctly paused now (thanks, Andrey Pustovetov), and we fixed an issue which could potentially have committed an incorrect offset to Kafka Connect (thanks, Thon Mekathikom).

MongoDB Connector

If you’ve ever compared the structures of change events emitted by the Debezium RDBMS connectors (MySQL, Postgres) and the MongoDB connector, you’ll know that the message structure of the latter is a bit different than the others. Due to the schemaless nature of MongoDB, the change events essentially contain a String with a JSON representation of the applied insert or patch. This structure cannot be consumed by existing sink connectors, such as the Confluent connectors for JDBC or Elasticsearch.

This gets possible now by means of a newly added single message transformation (SMT), which parses these JSON strings and creates a structured Kafka Connect record from it (thanks, Sairam Polavarapu!). When applying this SMT to the JDBC sink connector, you can now stream data changes from MongoDB to any supported relational database.

Note that this SMT is work-in-progress, details of its emitted message structure may still change. Also there are some inherent limitations to what can be achieved with it, if you e.g. have arrays in your MongoDB documents, the record created by this SMT will be structured accordingly, but many sink connectors cannot process such structure.

We have some ideas for further development here, e.g. there could be an option for flattening out (non-array) nested structures, so that e.g. { "address" { "street" : "..." } } would be represented as address_street, which then could be consumed by sink connectors expecting a flat structure.

The new SMT is described in detail in our docs.

What’s next?

Please see the full change log for more details and the complete list of issues fixed in Debezium 0.7.2.

The 0.7.3 release is scheduled for February 14th.

We’ll focus on some more bug fixes, also we’re working on having Debezium regulary emit heartbeat messages to a dedicated topic. This will be practical for diagnostic purposes but also help to regularly trigger commits of the offset in Kafka Connect. That’s beneficial in certain situations when capturing tables which only very infrequently change.

We’ve also worked out a roadmap describing our ideas for future work on Debezium, going beyond the next bugfix releases. While nothing is cast in stone, this is our idea of the features to add in the coming months. If you miss anything important on this roadmap, please tell us either in the comments below or send a message to our Google group. Looking forward to your feedback!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/02/15/debezium-0-7-3-released/index.html b/blog/2018/02/15/debezium-0-7-3-released/index.html index 3c288596fa..0c252d6fce 100644 --- a/blog/2018/02/15/debezium-0-7-3-released/index.html +++ b/blog/2018/02/15/debezium-0-7-3-released/index.html @@ -1 +1 @@ - Debezium 0.7.3 Is Released

I’m very happy to announce the release of Debezium 0.7.3!

This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

Let’s take a closer look at some of the new features.

All Connectors

Using the new connector option tombstones.on.delete you can now control whether upon record deletions a tombstone event should be emitted or not (DBZ-582). Doing so is usually the right thing and thus remains the default behaviour. But disabling tombstones may be desirable in certain situations, and this gets a bit easier now using that option (before you’d have to use an SMT - single message transform -, which for instance isn’t supported when using Debezium’s embedded mode). This feature was contributed by our community member Raf Liwoch. Thanks!

We’ve also spent some time on a few operational aspects: The sourceInfo element of Debezium’s change data messages contains a new field representing the version of the connector that created the message (DBZ-593). This lets message consumers take specific action based on the version. For instance this can be helpful where a new Debezium release fixes a bug, which consumers could work around so far. Now, after the update to that new Debezium version, that workaround should not be applied anymore. The version field will allow consumers to decide whether to apply the workaround or not.

The names of all the threads managed by Debezium are now structured in the form of "debezium-<connector>-…​" (DBZ-587). This helps with identifying Debezium’s threads when analyzing thread dumps for instance.

Postgres Connector

Here we’ve focused on improving the support for array types: besides fixing a bug related to numeric arrays (DBZ-577) we’ve also completed the support for the PostGIS types (which was introduced in 0.7.2), allowing you to capture array columns of types GEOMETRY and GEOGRAPHY.

Snapshots are now correctly interruptable (DBZ-586) and the connector will correctly handle the case where after a restart it should continue from a WAL position which isn’t available any more: it’ll stop, requiring you to do a new snapshot (DBZ-590).

MySQL Connector

The MySQL connector can create the DB history topic automatically, if needed (DBZ-278). This means you don’t have to create that topic yourself and you also don’t need to rely on Kafka’s automatic topic creation any longer (any change data topics will automatically be created by Kafka Connect).

Also the connector can optionally emit messages to a dedicated heartbeat topic in a configurable interval (DBZ-220). This comes in handy in situations where you only want to capture tables with low traffic, while other tables in the database are changed more frequently. In that case, no messages would have been emitted to Kafka Connect for a long time, and thus no offset would have been committed either. This could have caused trouble when restarting the connector: it wanted to resume from the last comitted offset, which may not be available in the binlogs any longer. But as the captured tables didn’t change, it actually wouldn’t be necessary to resume from such old binlog position. This all is avoided by emitting messages to the heartbeat topic regularly, which causes the last offset the connector has seen to be committed.

We’ll roll out this change to the other connectors, too, in future releases.

What’s next?

Please see the full change log for more details and the complete list of issues fixed in Debezium 0.7.3.

The next release is scheduled for March 7th. We’ll still have to decide whether that will be 0.7.4 or 0.8.0, depending on how far we are by then with our work on the Oracle connector (DBZ-137).

Please also our roadmap describing our ideas for future development of Debezium. This is our current thinking of the things we’d like to tackle in the coming months, but it’s not cast in stone, so please let us know about your feature requests by sending a message to our Google group. We’re looking forward to your feedback!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.7.3 Is Released

I’m very happy to announce the release of Debezium 0.7.3!

This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

Let’s take a closer look at some of the new features.

All Connectors

Using the new connector option tombstones.on.delete you can now control whether upon record deletions a tombstone event should be emitted or not (DBZ-582). Doing so is usually the right thing and thus remains the default behaviour. But disabling tombstones may be desirable in certain situations, and this gets a bit easier now using that option (before you’d have to use an SMT - single message transform -, which for instance isn’t supported when using Debezium’s embedded mode). This feature was contributed by our community member Raf Liwoch. Thanks!

We’ve also spent some time on a few operational aspects: The sourceInfo element of Debezium’s change data messages contains a new field representing the version of the connector that created the message (DBZ-593). This lets message consumers take specific action based on the version. For instance this can be helpful where a new Debezium release fixes a bug, which consumers could work around so far. Now, after the update to that new Debezium version, that workaround should not be applied anymore. The version field will allow consumers to decide whether to apply the workaround or not.

The names of all the threads managed by Debezium are now structured in the form of "debezium-<connector>-…​" (DBZ-587). This helps with identifying Debezium’s threads when analyzing thread dumps for instance.

Postgres Connector

Here we’ve focused on improving the support for array types: besides fixing a bug related to numeric arrays (DBZ-577) we’ve also completed the support for the PostGIS types (which was introduced in 0.7.2), allowing you to capture array columns of types GEOMETRY and GEOGRAPHY.

Snapshots are now correctly interruptable (DBZ-586) and the connector will correctly handle the case where after a restart it should continue from a WAL position which isn’t available any more: it’ll stop, requiring you to do a new snapshot (DBZ-590).

MySQL Connector

The MySQL connector can create the DB history topic automatically, if needed (DBZ-278). This means you don’t have to create that topic yourself and you also don’t need to rely on Kafka’s automatic topic creation any longer (any change data topics will automatically be created by Kafka Connect).

Also the connector can optionally emit messages to a dedicated heartbeat topic in a configurable interval (DBZ-220). This comes in handy in situations where you only want to capture tables with low traffic, while other tables in the database are changed more frequently. In that case, no messages would have been emitted to Kafka Connect for a long time, and thus no offset would have been committed either. This could have caused trouble when restarting the connector: it wanted to resume from the last comitted offset, which may not be available in the binlogs any longer. But as the captured tables didn’t change, it actually wouldn’t be necessary to resume from such old binlog position. This all is avoided by emitting messages to the heartbeat topic regularly, which causes the last offset the connector has seen to be committed.

We’ll roll out this change to the other connectors, too, in future releases.

What’s next?

Please see the full change log for more details and the complete list of issues fixed in Debezium 0.7.3.

The next release is scheduled for March 7th. We’ll still have to decide whether that will be 0.7.4 or 0.8.0, depending on how far we are by then with our work on the Oracle connector (DBZ-137).

Please also our roadmap describing our ideas for future development of Debezium. This is our current thinking of the things we’d like to tackle in the coming months, but it’s not cast in stone, so please let us know about your feature requests by sending a message to our Google group. We’re looking forward to your feedback!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/03/07/debezium-0-7-4-released/index.html b/blog/2018/03/07/debezium-0-7-4-released/index.html index caaa92c19c..94f2da6ac0 100644 --- a/blog/2018/03/07/debezium-0-7-4-released/index.html +++ b/blog/2018/03/07/debezium-0-7-4-released/index.html @@ -1 +1 @@ - Debezium 0.7.4 Is Released

It’s my pleasure to announce the release of Debezium 0.7.4!

Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

New features

In terms of new features, there’s a new mode for handling decimal columns in Postgres and MySQL (DBZ-611). By setting the decimal.handling.mode connector option to string, Debezium will emit decimal and numeric columns as Strings. That oftentimes is easier to handle for consumers than the byte-array based representation used by default, while keeping the full precision. As a bonus, string also allows to convey the special numeric values NaN and Infinity as supported by Postgres. Note that this functionality required an update to Debezium’s logical decoding plug-in which runs within the Postgres database server. This plug-in must be upgraded to the new version before upgrading the Debezium Postgres connector.

Speaking of byte arrays, the BYTEA column type in Postgres is now also supported (DBZ-605).

For the MySQL connector, there’s a new option to the snapshotting routine: snapshot.locking.mode (DBZ-602). By setting this to NONE, this option allows to skip any table locks during snapshotting. This should be used if and only if you’re absolutely sure that the tables don’t undergo structural changes (columns added, removed etc.) while the snapshot is taken. But if that’s guaranteed, the new mode can be a useful tool for increasing overall system performance, as writes by concurrent processes won’t be blocked. That’s especially useful on environments such as Amazon RDS, where the connector otherwise would be required to keep a lock for the entirety of the snapshot. The new option supersedes the existing snapshot.minimal.locks option. Please see the connector documentation for the details. This feature was contributed by our community member Stephen Powis; many thanks to you!

Bug Fixes

0.7.4 brings multiple fixes related to how numeric columns are handled. E.g. columns without scale couldn’t correctly be processed by the MySQL connector during binlog reading (DBZ-615). That’s fixed now. And when using the Postgres connector, arbitrary precision column values are correctly converted into change data message fields now (DBZ-351).

We also noticed a regression introduced in Debezium 0.6: the field schema for NUMERIC columns was always marked as optional, also if that column was actually declared as NOT NULL. The same affected geo-spatial array types on Postgres as supported as of Debezium 0.7. This has been fixed with DBZ-635. We don’t expect any impact on consumers by this change (just as before, they’ll always get a value for such field, only its schema won’t be incorrectly marked as optional any more).

Please see the full change log for more details and the complete list of issues fixed in Debezium 0.7.4.

What’s next?

Following our three weeks release cadence, the next Debezium release is planned for March 28th. We got some exciting changes in the works for that: if things go as planned, we’ll release the first version of our Oracle connector (DBZ-20). This will be based on the Oracle XStream API in the first iteration and not support snapshots yet. But we felt it’d make sense to roll out this connector incrementally, so to get out the new feature early on and collect feedback on it. We’ve also planned to explore alternatives to using the XStream API in future releases.

Another great new feature will be Reactive Streams support (DBZ-566). Based on top of the existing embedded mode, this will make it very easy to consume change data events using Reactive Streams implementations such as RxJava 2, the Java 9 Flow API and many more. It’ll also be very useful to consume change events in reactive frameworks such as Vert.x. We’re really looking forward to shipping this feature and already have a pending pull request for it. If you like, take a look and let us know about your feedback!

Please also check out our roadmap for the coming months of Debezium’s development. This is our current plan for the things we’ll work on, but it’s not cast in stone, so please tell us about your feature requests by sending a message to our Google group. We’re looking forward to your feedback!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.7.4 Is Released

It’s my pleasure to announce the release of Debezium 0.7.4!

Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

New features

In terms of new features, there’s a new mode for handling decimal columns in Postgres and MySQL (DBZ-611). By setting the decimal.handling.mode connector option to string, Debezium will emit decimal and numeric columns as Strings. That oftentimes is easier to handle for consumers than the byte-array based representation used by default, while keeping the full precision. As a bonus, string also allows to convey the special numeric values NaN and Infinity as supported by Postgres. Note that this functionality required an update to Debezium’s logical decoding plug-in which runs within the Postgres database server. This plug-in must be upgraded to the new version before upgrading the Debezium Postgres connector.

Speaking of byte arrays, the BYTEA column type in Postgres is now also supported (DBZ-605).

For the MySQL connector, there’s a new option to the snapshotting routine: snapshot.locking.mode (DBZ-602). By setting this to NONE, this option allows to skip any table locks during snapshotting. This should be used if and only if you’re absolutely sure that the tables don’t undergo structural changes (columns added, removed etc.) while the snapshot is taken. But if that’s guaranteed, the new mode can be a useful tool for increasing overall system performance, as writes by concurrent processes won’t be blocked. That’s especially useful on environments such as Amazon RDS, where the connector otherwise would be required to keep a lock for the entirety of the snapshot. The new option supersedes the existing snapshot.minimal.locks option. Please see the connector documentation for the details. This feature was contributed by our community member Stephen Powis; many thanks to you!

Bug Fixes

0.7.4 brings multiple fixes related to how numeric columns are handled. E.g. columns without scale couldn’t correctly be processed by the MySQL connector during binlog reading (DBZ-615). That’s fixed now. And when using the Postgres connector, arbitrary precision column values are correctly converted into change data message fields now (DBZ-351).

We also noticed a regression introduced in Debezium 0.6: the field schema for NUMERIC columns was always marked as optional, also if that column was actually declared as NOT NULL. The same affected geo-spatial array types on Postgres as supported as of Debezium 0.7. This has been fixed with DBZ-635. We don’t expect any impact on consumers by this change (just as before, they’ll always get a value for such field, only its schema won’t be incorrectly marked as optional any more).

Please see the full change log for more details and the complete list of issues fixed in Debezium 0.7.4.

What’s next?

Following our three weeks release cadence, the next Debezium release is planned for March 28th. We got some exciting changes in the works for that: if things go as planned, we’ll release the first version of our Oracle connector (DBZ-20). This will be based on the Oracle XStream API in the first iteration and not support snapshots yet. But we felt it’d make sense to roll out this connector incrementally, so to get out the new feature early on and collect feedback on it. We’ve also planned to explore alternatives to using the XStream API in future releases.

Another great new feature will be Reactive Streams support (DBZ-566). Based on top of the existing embedded mode, this will make it very easy to consume change data events using Reactive Streams implementations such as RxJava 2, the Java 9 Flow API and many more. It’ll also be very useful to consume change events in reactive frameworks such as Vert.x. We’re really looking forward to shipping this feature and already have a pending pull request for it. If you like, take a look and let us know about your feedback!

Please also check out our roadmap for the coming months of Debezium’s development. This is our current plan for the things we’ll work on, but it’s not cast in stone, so please tell us about your feature requests by sending a message to our Google group. We’re looking forward to your feedback!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/03/08/creating-ddd-aggregates-with-debezium-and-kafka-streams/index.html b/blog/2018/03/08/creating-ddd-aggregates-with-debezium-and-kafka-streams/index.html index e5ebffa6bb..c61fb07acd 100644 --- a/blog/2018/03/08/creating-ddd-aggregates-with-debezium-and-kafka-streams/index.html +++ b/blog/2018/03/08/creating-ddd-aggregates-with-debezium-and-kafka-streams/index.html @@ -126,4 +126,4 @@ } }

Due to the combination of the data in a single document some parts aren’t needed or redundant. To get rid of any unwanted data (e.g. _eventType, customer_id of each address sub-document) it would also be possible to adapt the configuration in order to blacklist said fields.

Finally, you update some customer or address data in the MySQL source database:

docker-compose exec mysql bash -c 'mysql -u $MYSQL_USER -p$MYSQL_PASSWORD inventory'
 
-mysql> update customers set first_name= "Sarah" where id = 1001;

Shortly thereafter, you should see that the corresponding aggregate document in MongoDB has been updated accordingly.

Drawbacks and Limitations

While this first version for creating DDD aggregates from table-based CDC events basically works, it is very important to understand its current limitations:

  • not generically applicable thus needs custom code for POJOs and intermediate types

  • cannot be scaled across multiple instances as is due to missing but necessary data repartitioning prior to processing

  • limited to building aggregates based on a single JOIN between 1:N relationships

  • resulting DDD aggregates are eventually consistent, meaning that it is possible for them to temporarily exhibit intermediate state before converging

The first few can be addressed with a reasonable amount of work on the KStreams application. The last one, dealing with the eventually consistent nature of resulting DDD aggregates is much harder to correct and will require some efforts at Debezium’s own CDC mechanism.

Outlook

In this post we described an approach for creating aggregated events from Debezium’s CDC events. In a follow-up blog post we may dive a bit more into the topic of how to be able to horizontally scale the DDD creation by running multiple KStreams aggregator instances. For that purpose, the data needs proper re-partitioning before running the topology. In addition, it could be interesting to look into a somewhat more generic version which only needs custom classes to the describe the two main POJOs involved.

We also thought about providing a ready-to-use component which would work in a generic way (based on Connect records, i.e. not tied to a specific serialization format such as JSON) and could be set up as a configurable stand-alone process running given aggregations.

Also on the topic of dealing with eventual consistency we got some ideas, but those will need some more exploration and investigation for sure. Stay tuned!

We’d love to hear about your feedback on the topic of event aggreation. If you got any ideas or thoughts on the subject, please get in touch by posting a comment below or sending a message to our mailing list.

Hans-Peter Grahsl

Hans-Peter is a technical trainer at NETCONOMY as well as an individual consultant for Java web development and modern data architectures. Besides, he is working as an associate lecturer for software engineering. He lives in Graz, Austria.

     

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +mysql> update customers set first_name= "Sarah" where id = 1001;

Shortly thereafter, you should see that the corresponding aggregate document in MongoDB has been updated accordingly.

Drawbacks and Limitations

While this first version for creating DDD aggregates from table-based CDC events basically works, it is very important to understand its current limitations:

  • not generically applicable thus needs custom code for POJOs and intermediate types

  • cannot be scaled across multiple instances as is due to missing but necessary data repartitioning prior to processing

  • limited to building aggregates based on a single JOIN between 1:N relationships

  • resulting DDD aggregates are eventually consistent, meaning that it is possible for them to temporarily exhibit intermediate state before converging

The first few can be addressed with a reasonable amount of work on the KStreams application. The last one, dealing with the eventually consistent nature of resulting DDD aggregates is much harder to correct and will require some efforts at Debezium’s own CDC mechanism.

Outlook

In this post we described an approach for creating aggregated events from Debezium’s CDC events. In a follow-up blog post we may dive a bit more into the topic of how to be able to horizontally scale the DDD creation by running multiple KStreams aggregator instances. For that purpose, the data needs proper re-partitioning before running the topology. In addition, it could be interesting to look into a somewhat more generic version which only needs custom classes to the describe the two main POJOs involved.

We also thought about providing a ready-to-use component which would work in a generic way (based on Connect records, i.e. not tied to a specific serialization format such as JSON) and could be set up as a configurable stand-alone process running given aggregations.

Also on the topic of dealing with eventual consistency we got some ideas, but those will need some more exploration and investigation for sure. Stay tuned!

We’d love to hear about your feedback on the topic of event aggreation. If you got any ideas or thoughts on the subject, please get in touch by posting a comment below or sending a message to our mailing list.

Hans-Peter Grahsl

Hans-Peter is a technical trainer at NETCONOMY as well as an individual consultant for Java web development and modern data architectures. Besides, he is working as an associate lecturer for software engineering. He lives in Graz, Austria.

     

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/03/16/note-on-database-history-topic-configuration/index.html b/blog/2018/03/16/note-on-database-history-topic-configuration/index.html index adb17ca29d..c2c968e291 100644 --- a/blog/2018/03/16/note-on-database-history-topic-configuration/index.html +++ b/blog/2018/03/16/note-on-database-history-topic-configuration/index.html @@ -3,4 +3,4 @@ --entity-type topics \ --entity-name <DB_HISTORY_TOPIC> \ --alter \ - --add-config retention.bytes=-1

In case parts of the history topic were removed already, you can use the snapshot mode schema_only_recovery for re-creating the history topic in case no schema changes have happened since the last committed offset of the connector. Alternatively, a complete new snapshot should be taken, e.g. by setting up a new connector instance.

Next steps

We’ll release Debezium 0.7.5 with a fix for this issue early next week. Note that previously created database history topics should be re-configured as described above. Please don’t hesitate to get in touch in the comments below, the chat room or the mailing list in case you have any further questions on this issue.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + --add-config retention.bytes=-1

In case parts of the history topic were removed already, you can use the snapshot mode schema_only_recovery for re-creating the history topic in case no schema changes have happened since the last committed offset of the connector. Alternatively, a complete new snapshot should be taken, e.g. by setting up a new connector instance.

Next steps

We’ll release Debezium 0.7.5 with a fix for this issue early next week. Note that previously created database history topics should be re-configured as described above. Please don’t hesitate to get in touch in the comments below, the chat room or the mailing list in case you have any further questions on this issue.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/03/20/debezium-0-7-5-released/index.html b/blog/2018/03/20/debezium-0-7-5-released/index.html index 60757e16fd..24ffb60f2f 100644 --- a/blog/2018/03/20/debezium-0-7-5-released/index.html +++ b/blog/2018/03/20/debezium-0-7-5-released/index.html @@ -1 +1 @@ - Debezium 0.7.5 Is Released

It’s my pleasure to announce the release of Debezium 0.7.5!

This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

And we got some more bugfixes by our fantastic community: Long-term community member Peter Goransson fixed an issue about the snapshot JMX metrics of the MySQL connector, which are now also accessible after the snapshot has been completed (DBZ-640). Andrew Tongen spotted and fixed an issue for the Debezium embedded engine (DBZ-665) which caused offsets to be committed more often than needed. And Matthias Wessendorf upgraded the Debezium dependencies and Docker images to Apache Kafka 1.0.1 (DBZ-647).

Thank you all for your help!

Please refer to the change log for the complete list of changes in Debezium 0.7.5.

What’s next?

Please see the previous release announcement for the next planned features. Due to the unplanned 0.7.5 release, though, the schedule of the next one will likely be extended a little bit.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.7.5 Is Released

It’s my pleasure to announce the release of Debezium 0.7.5!

This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

And we got some more bugfixes by our fantastic community: Long-term community member Peter Goransson fixed an issue about the snapshot JMX metrics of the MySQL connector, which are now also accessible after the snapshot has been completed (DBZ-640). Andrew Tongen spotted and fixed an issue for the Debezium embedded engine (DBZ-665) which caused offsets to be committed more often than needed. And Matthias Wessendorf upgraded the Debezium dependencies and Docker images to Apache Kafka 1.0.1 (DBZ-647).

Thank you all for your help!

Please refer to the change log for the complete list of changes in Debezium 0.7.5.

What’s next?

Please see the previous release announcement for the next planned features. Due to the unplanned 0.7.5 release, though, the schedule of the next one will likely be extended a little bit.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/05/24/querying-debezium-change-data-eEvents-with-ksql/index.html b/blog/2018/05/24/querying-debezium-change-data-eEvents-with-ksql/index.html index e6f4da01a4..8a04dfeace 100644 --- a/blog/2018/05/24/querying-debezium-change-data-eEvents-with-ksql/index.html +++ b/blog/2018/05/24/querying-debezium-change-data-eEvents-with-ksql/index.html @@ -102,4 +102,4 @@ 10003 | 2 | George | Bailey 10004 | 1 | Edward | Walker 10005 | 5 | Edward | Walker -10004 | 20 | Edward | Walker

Summary

We have successfully started a KSQL instance. We have mapped KSQL streams to Debezium topics filled by Debezium and made a join between them. We have also discussed the problem of repartioning in streaming applications.

If you’d like to try out this example with Avro encoding and schema registry then you can use our Avro example. Also for further details and more advanced usages just refer to the KSQL syntax reference.

In case you need help, have feature requests or would like to share your experiences with this example, please let us know in the comments below.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +10004 | 20 | Edward | Walker

Summary

We have successfully started a KSQL instance. We have mapped KSQL streams to Debezium topics filled by Debezium and made a join between them. We have also discussed the problem of repartioning in streaming applications.

If you’d like to try out this example with Avro encoding and schema registry then you can use our Avro example. Also for further details and more advanced usages just refer to the KSQL syntax reference.

In case you need help, have feature requests or would like to share your experiences with this example, please let us know in the comments below.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/06/21/debezium-0-8-0-beta1-released/index.html b/blog/2018/06/21/debezium-0-8-0-beta1-released/index.html index 91d05d16c5..7e42fb9225 100644 --- a/blog/2018/06/21/debezium-0-8-0-beta1-released/index.html +++ b/blog/2018/06/21/debezium-0-8-0-beta1-released/index.html @@ -1 +1 @@ - Debezium 0.8.0.Beta1 Is Released

It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

This release would not have been possible without our outstanding community; a huge "thank you" goes out to the following open source enthusiasts who all contributed to the new version: Echo Xu, Ivan Vucina, Listman Gamboa, Omar Al-Safi, Peter Goransson, Roman Kuchar (who did a tremendous job with the new DDL parser implementation!), Sagar Rao, Saulius Valatka, Sairam Polavarapu, Stephen Powis and WenZe Hu.

Thank you all very much for your help!

Now let’s take a closer look at some of the features new in Debezium 0.8.0.Beta1; as always, you can find the complete list of changes of this release in the change log. Plese take a special look at the breaking changes and the upgrade notes.

XStream-based Oracle Connector (Tech Preview)

Support for a Debezium Oracle connector has been one of the most asked for features for a long time (its original issue number is DBZ-20!). So we are very happy that we eventually can release a first work-in-progress version of that connector. At this point this code is still very much evolving, so it should be considered as a first tech preview. This means it’s not feature complete (most notably, there’s no support for initial snapshots yet), the emitted message format may still change etc. So while we don’t recommend using it in production quite yet, you should definitely give it a try and report back about your experiences.

One challenge for the Oracle connector is how to get the actual change events out of the database. Unlike with MySQL and Postgres, there’s unfortunately no free-to-use and easy-to-work-with API which would allow to do the same for Oracle. After some exploration we decided to base this first version of the connector on the Oracle XStream API. While this (kinda) checks the box for "easy-to-work-with", it doesn’t do so for "free-to-use": using this API requires you to have a license for Oracle’s separate GoldenGate product. We’re fully aware of this being not ideal, but we decided to still go this route as a first step, allowing us to get some experiences with Oracle and also get a connector into the hands of those with the required license handy. Going forward, we are going to explore alternative approaches. We already have some ideas and discussions around this, so please stay tuned (the issue to track is DBZ-137).

The Oracle connector is going to evolve within the next 0.8.x releases. To learn more about it, please check its connector documentation page.

Antlr-based MySQL DDL Parser

In order to build up an internal meta-model of the captured database’s structure, the Debezium MySQL connector needs to parse all issued DDL statements (CREATE TABLE etc.). This used to be done with a hand-written DDL parser which worked reasonably well, but over time it also revealed some shortcomings; as the DDL language is quite extensive, we saw repeatedly bug reports caused by some specific DDL constructs not being parseable.

So we decided to go back to the drawing board and came up with a brand new parser design. Thanks to the great work of Roman Kuchar, we now have a completely new DDL parser which is based on the proven and very mature Antlr parser generator (luckily, the Antlr project provides a complete MySQL grammar). So we should see much less issue reports related to DDL parsing going forward.

For the time being, the old parser still is in place and remains to be the default parser for Debezium 0.8.x. You are very encouraged though to test the new implementation by setting the connector option ddl.parser.mode to antlr and report back if you run into any issues doing so. We plan to improve and polish the Antlr parser during the 0.8.x release line (specifically we’re going to measure its performance and optimize as needed) and switch to it by default as of Debezium 0.9. Eventually, the old parser will be removed in a future release after that.

Further MySQL Connector Changes

The MySQL Connector propagates column default values to corresponding Kafka Connect schemas now (DBZ-191). That’s beneficial when using Avro as serialization format and the schema registry with compatibility checking enabled.

By setting the include.query connector option to true, you can add the original query that caused a data change to the corresponding CDC events (DBZ-706). While disabled by default, this feature can be a useful tool for analyzing and interpreting data changes captured with Debezium.

Some other changes in the MySQL connector include configurability of the heartbeat topic name (DBZ-668), fixes around timezone handling for TIMESTAMP (DBZ-578) and DATETIME columns (DBZ-741) and correct handling of NUMERIC column without an explicit scale value (DBZ-727).

Postgres Connector

The Debezium Connector for Postgres has seen quite a number of bugfixes, including the following ones:

  • wal2json can handle transactions now that are bigger than 1Gb (DBZ-638)

  • the transaction ID is consistently handled as long now (DBZ-673)

  • multiple fixes related to temporal column types (DBZ-681, DBZ-696)

  • OIDs are handled correctly as unsigned int now (DBZ-697, DBZ-701)

MongoDB Connector

Also for the MongoDB Connector a number of small feature implementations and bugfixes has been done:

  • Tested against MongoDB 3.6 (DBZ-529)

  • Nested documents can be flattened using a provided SMT now (DBZ-561), which is useful when sinking changes from MongoDB into a relational database

  • The unwrapping SMT can be used together with Avro now (DBZ-650)

  • The unwrapping SMT can handle arrays with mixed element types (DBZ-649)

  • When interrupted during snapshotting before completion, the connector will redo the snapshot after restarting (DBZ-712)

What’s next?

As per the new Beta/CR/Final release scheme, we hope to get some feedback by the community (i.e. you :) on this Beta release. Depending on the number of issues reported, we’ll either release another Beta or go to CR1 with the next version. The 0.8.0.Final version will be released within a few weeks. Note that the Oracle connector will remain a "tech preview" component also in the final version.

After that, we’ve planned to do a few 0.8.x releases with bug fixes mostly, while work on Debezium 0.9 will commence in parallel. For that we’ve planned to work on a connector for SQL Server (see DBZ-40). We’d also like to explore means of creating consistent materializations of joins from multiple tables' CDC streams, based on the ids of originating transactions. Also there’s the idea and a first prototype of exposing Debezium change events as a reactive event stream (DBZ-566), which might be shipped eventually.

Please take a look at the roadmap for some more long term ideas and get in touch with us, if you got thoughts around that.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.8.0.Beta1 Is Released

It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

This release would not have been possible without our outstanding community; a huge "thank you" goes out to the following open source enthusiasts who all contributed to the new version: Echo Xu, Ivan Vucina, Listman Gamboa, Omar Al-Safi, Peter Goransson, Roman Kuchar (who did a tremendous job with the new DDL parser implementation!), Sagar Rao, Saulius Valatka, Sairam Polavarapu, Stephen Powis and WenZe Hu.

Thank you all very much for your help!

Now let’s take a closer look at some of the features new in Debezium 0.8.0.Beta1; as always, you can find the complete list of changes of this release in the change log. Plese take a special look at the breaking changes and the upgrade notes.

XStream-based Oracle Connector (Tech Preview)

Support for a Debezium Oracle connector has been one of the most asked for features for a long time (its original issue number is DBZ-20!). So we are very happy that we eventually can release a first work-in-progress version of that connector. At this point this code is still very much evolving, so it should be considered as a first tech preview. This means it’s not feature complete (most notably, there’s no support for initial snapshots yet), the emitted message format may still change etc. So while we don’t recommend using it in production quite yet, you should definitely give it a try and report back about your experiences.

One challenge for the Oracle connector is how to get the actual change events out of the database. Unlike with MySQL and Postgres, there’s unfortunately no free-to-use and easy-to-work-with API which would allow to do the same for Oracle. After some exploration we decided to base this first version of the connector on the Oracle XStream API. While this (kinda) checks the box for "easy-to-work-with", it doesn’t do so for "free-to-use": using this API requires you to have a license for Oracle’s separate GoldenGate product. We’re fully aware of this being not ideal, but we decided to still go this route as a first step, allowing us to get some experiences with Oracle and also get a connector into the hands of those with the required license handy. Going forward, we are going to explore alternative approaches. We already have some ideas and discussions around this, so please stay tuned (the issue to track is DBZ-137).

The Oracle connector is going to evolve within the next 0.8.x releases. To learn more about it, please check its connector documentation page.

Antlr-based MySQL DDL Parser

In order to build up an internal meta-model of the captured database’s structure, the Debezium MySQL connector needs to parse all issued DDL statements (CREATE TABLE etc.). This used to be done with a hand-written DDL parser which worked reasonably well, but over time it also revealed some shortcomings; as the DDL language is quite extensive, we saw repeatedly bug reports caused by some specific DDL constructs not being parseable.

So we decided to go back to the drawing board and came up with a brand new parser design. Thanks to the great work of Roman Kuchar, we now have a completely new DDL parser which is based on the proven and very mature Antlr parser generator (luckily, the Antlr project provides a complete MySQL grammar). So we should see much less issue reports related to DDL parsing going forward.

For the time being, the old parser still is in place and remains to be the default parser for Debezium 0.8.x. You are very encouraged though to test the new implementation by setting the connector option ddl.parser.mode to antlr and report back if you run into any issues doing so. We plan to improve and polish the Antlr parser during the 0.8.x release line (specifically we’re going to measure its performance and optimize as needed) and switch to it by default as of Debezium 0.9. Eventually, the old parser will be removed in a future release after that.

Further MySQL Connector Changes

The MySQL Connector propagates column default values to corresponding Kafka Connect schemas now (DBZ-191). That’s beneficial when using Avro as serialization format and the schema registry with compatibility checking enabled.

By setting the include.query connector option to true, you can add the original query that caused a data change to the corresponding CDC events (DBZ-706). While disabled by default, this feature can be a useful tool for analyzing and interpreting data changes captured with Debezium.

Some other changes in the MySQL connector include configurability of the heartbeat topic name (DBZ-668), fixes around timezone handling for TIMESTAMP (DBZ-578) and DATETIME columns (DBZ-741) and correct handling of NUMERIC column without an explicit scale value (DBZ-727).

Postgres Connector

The Debezium Connector for Postgres has seen quite a number of bugfixes, including the following ones:

  • wal2json can handle transactions now that are bigger than 1Gb (DBZ-638)

  • the transaction ID is consistently handled as long now (DBZ-673)

  • multiple fixes related to temporal column types (DBZ-681, DBZ-696)

  • OIDs are handled correctly as unsigned int now (DBZ-697, DBZ-701)

MongoDB Connector

Also for the MongoDB Connector a number of small feature implementations and bugfixes has been done:

  • Tested against MongoDB 3.6 (DBZ-529)

  • Nested documents can be flattened using a provided SMT now (DBZ-561), which is useful when sinking changes from MongoDB into a relational database

  • The unwrapping SMT can be used together with Avro now (DBZ-650)

  • The unwrapping SMT can handle arrays with mixed element types (DBZ-649)

  • When interrupted during snapshotting before completion, the connector will redo the snapshot after restarting (DBZ-712)

What’s next?

As per the new Beta/CR/Final release scheme, we hope to get some feedback by the community (i.e. you :) on this Beta release. Depending on the number of issues reported, we’ll either release another Beta or go to CR1 with the next version. The 0.8.0.Final version will be released within a few weeks. Note that the Oracle connector will remain a "tech preview" component also in the final version.

After that, we’ve planned to do a few 0.8.x releases with bug fixes mostly, while work on Debezium 0.9 will commence in parallel. For that we’ve planned to work on a connector for SQL Server (see DBZ-40). We’d also like to explore means of creating consistent materializations of joins from multiple tables' CDC streams, based on the ids of originating transactions. Also there’s the idea and a first prototype of exposing Debezium change events as a reactive event stream (DBZ-566), which might be shipped eventually.

Please take a look at the roadmap for some more long term ideas and get in touch with us, if you got thoughts around that.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/07/04/debezium-0-8-0-cr1-released/index.html b/blog/2018/07/04/debezium-0-8-0-cr1-released/index.html index 50f82b7f4f..0ec5009c90 100644 --- a/blog/2018/07/04/debezium-0-8-0-cr1-released/index.html +++ b/blog/2018/07/04/debezium-0-8-0-cr1-released/index.html @@ -1 +1 @@ - Debezium 0.8.0.CR1 Is Released

A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

In terms of new features, the CR1 release brings support for CITEXT columns in the Postgres connector (DBZ-762). All the relational connectors support it now to convey the original name and length of captured columns using schema parameters in the emitted change messages (DBZ-644). This can come in handy to properly size columns in a sink database for types such as VARCHAR.

Thanks a lot to the following community members who contributed to this release: Andreas Bergmeier, Olavi Mustanoja and Orr Ganani.

Please take a look at the change log for the complete list of changes in 0.8.0.CR1 and general upgrade notes.

What’s next?

Barring any unforeseen issues and critical bug reports, we’ll release Debezium 0.8.0.Final next week.

Once that’s out, we’ll continue work on the Oracle connector (e.g. exploring alternatives to using XStream for ingesting changes from the database as well as initial snapshotting), which remains a "tech preview" component as of 0.8.

We’ll also work towards a connector for SQL Server (see DBZ-40), for which the first steps just have been made today by preparing a Docker-based setup with a CDC-enabled SQL Server instance, allowing to implement and test the connector in the following.

To find out about some more long term ideas, please check out our roadmap and get in touch with us, if you got any ideas or suggestions for future development.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.8.0.CR1 Is Released

A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

In terms of new features, the CR1 release brings support for CITEXT columns in the Postgres connector (DBZ-762). All the relational connectors support it now to convey the original name and length of captured columns using schema parameters in the emitted change messages (DBZ-644). This can come in handy to properly size columns in a sink database for types such as VARCHAR.

Thanks a lot to the following community members who contributed to this release: Andreas Bergmeier, Olavi Mustanoja and Orr Ganani.

Please take a look at the change log for the complete list of changes in 0.8.0.CR1 and general upgrade notes.

What’s next?

Barring any unforeseen issues and critical bug reports, we’ll release Debezium 0.8.0.Final next week.

Once that’s out, we’ll continue work on the Oracle connector (e.g. exploring alternatives to using XStream for ingesting changes from the database as well as initial snapshotting), which remains a "tech preview" component as of 0.8.

We’ll also work towards a connector for SQL Server (see DBZ-40), for which the first steps just have been made today by preparing a Docker-based setup with a CDC-enabled SQL Server instance, allowing to implement and test the connector in the following.

To find out about some more long term ideas, please check out our roadmap and get in touch with us, if you got any ideas or suggestions for future development.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/07/12/debezium-0-8-0-final-released/index.html b/blog/2018/07/12/debezium-0-8-0-final-released/index.html index 377cc12d0e..445f79bc85 100644 --- a/blog/2018/07/12/debezium-0-8-0-final-released/index.html +++ b/blog/2018/07/12/debezium-0-8-0-final-released/index.html @@ -1 +1 @@ - Debezium 0.8 Final Is Released

I’m very happy to announce the release of Debezium 0.8.0.Final!

The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

What’s next?

We’re continuing our work on the Oracle connector. The work on initial snapshotting is well progressing and it should be part of the next release. Other improvements will be support for structural changes to captured tables after the initial snapshot has been made, more extensive source info metadata and more. Please track DBZ-716 for this work; the improvements are planned to be released incrementally in the upcoming versions of Debezium.

We’ve also started to explore ingesting changes via LogMiner. This is more involved in terms of engineering efforts than using XStream, but it comes with the huge advantage of not requiring a separate license (LogMiner comes with the Oracle database itself). It’s not quite clear yet when we can release something on this front, and we’re also actively exploring further alternatives. But we are quite optimistic and hope to have something some time soon.

The other focus of work is a connector for SQL Server (see DBZ-40). Work on this has started as well, and there should be an Alpha1 release of Debezium 0.9 with a first drop of that connector within the next few weeks.

To find out about some more long term ideas, please check out our roadmap and get in touch with us, if you got any ideas or suggestions for future development.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.8 Final Is Released

I’m very happy to announce the release of Debezium 0.8.0.Final!

The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

What’s next?

We’re continuing our work on the Oracle connector. The work on initial snapshotting is well progressing and it should be part of the next release. Other improvements will be support for structural changes to captured tables after the initial snapshot has been made, more extensive source info metadata and more. Please track DBZ-716 for this work; the improvements are planned to be released incrementally in the upcoming versions of Debezium.

We’ve also started to explore ingesting changes via LogMiner. This is more involved in terms of engineering efforts than using XStream, but it comes with the huge advantage of not requiring a separate license (LogMiner comes with the Oracle database itself). It’s not quite clear yet when we can release something on this front, and we’re also actively exploring further alternatives. But we are quite optimistic and hope to have something some time soon.

The other focus of work is a connector for SQL Server (see DBZ-40). Work on this has started as well, and there should be an Alpha1 release of Debezium 0.9 with a first drop of that connector within the next few weeks.

To find out about some more long term ideas, please check out our roadmap and get in touch with us, if you got any ideas or suggestions for future development.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/07/19/advantages-of-log-based-change-data-capture/index.html b/blog/2018/07/19/advantages-of-log-based-change-data-capture/index.html index 5abbdc56a1..30900f283c 100644 --- a/blog/2018/07/19/advantages-of-log-based-change-data-capture/index.html +++ b/blog/2018/07/19/advantages-of-log-based-change-data-capture/index.html @@ -1 +1 @@ - Five Advantages of Log-Based Change Data Capture

Yesterday I had the opportunity to present Debezium and the idea of change data capture (CDC) to the Darmstadt Java User Group. It was a great evening with lots of interesting discussions and questions. One of the questions being the following: what is the advantage of using a log-based change data capturing tool such as Debezium over simply polling for updated records?

So first of all, what’s the difference between the two approaches? With polling-based (or query-based) CDC you repeatedly run queries (e.g. via JDBC) for retrieving any newly inserted or updated rows from the tables to be captured. Log-based CDC in contrast works by reacting to any changes to the database’s log files (e.g. MySQL’s binlog or MongoDB’s op log).

As this wasn’t the first time this question came up, I thought I could provide a more extensive answer also here on the blog. That way I’ll be able to refer to this post in the future, should the question come up again :)

So without further ado, here’s my list of five advantages of log-based CDC over polling-based approaches.

All Data Changes Are Captured

By reading the database’s log, you get the complete list of all data changes in their exact order of application. This is vital for many use cases where you are interested in the complete history of record changes. In contrast, with a polling-based approach you might miss intermediary data changes that happen between two runs of the poll loop. For instance it could happen that a record is inserted and deleted between two polls, in which case this record would never be captured by poll-based CDC.

Related to this is the aspect of downtimes, e.g. when updating the CDC tool. With poll-based CDC, only the latest state of a given record would be captured once the CDC tool is back online, missing any earlier changes to the record that occurred during the downtime. A log-based CDC tool will be able to resume reading the database log from the point where it left off before it was shut down, causing the complete history of data changes to be captured.

Low Delays of Events While Avoiding Increased CPU Load

With polling, you might be tempted to increase the frequency of polling attempts in order to reduce the chances of missing intermediary updates. While this works to some degree, polling too frequently may cause performance issues (as the queries used for polling cause load on the source database). On the other hand, expanding the polling interval will reduce the CPU load but may not only result in missed change events but also in a longer delay for propagating data changes. Log-based CDC allows you to react to data changes in near real-time without paying the price of spending CPU time on running polling queries repeatedly.

No Impact on Data Model

Polling requires some indicator to identify those records that have been changed since the last poll. So all the captured tables need to have some column like LAST_UPDATE_TIMESTAMP which can be used to find changed rows. This can be fine in some cases, but in others such requirement might not be desirable. Specifically, you’ll need to make sure that the update timestamps are maintained correctly on all tables to be captured by the writing applications or e.g. through triggers.

Can Capture Deletes

Naturally, polling will not allow you to identify any records that have been deleted since the last poll. Often times that’s a problem for replication-like use cases where you’d like to have an identical data set on the source database and the replication targets, meaning you’d also like to delete records on the sink side if they have been removed in the source database.

Can Capture Old Record State And Further Meta Data

Depending on the source database’s capabilities, log-based CDC can provide the old record state for update and delete events. Whereas with polling, you’ll only get the current row state. Having the old row state handy in a single change event can be interesting for many use cases, e.g. if you’d like to display the complete data change with old and new column values to an application user for auditing purposes.

In addition, log-based approaches often can provide streams of schema changes (e.g. in form of applied DDL statements) and expose additional metadata such as transaction ids or the user applying a certain change. These things may generally be doable with query-based approaches, too (depending on the capabilities of the database), I haven’t really seen it being done in practice, though.

Summary

And that’s it, five advantages of log-based change data capture. Note that this is not to say that polling-based CDC doesn’t have its applications. If for instance your use case can be satisfied by propagating changes once per hour and it’s not a problem to miss intermediary versions of records that were valid in between, it can be perfectly fine.

But if you’re interested in capturing data changes in near real-time, making sure you don’t miss any change events (including deletions), then I’d recommend very much to explore the possibilities of log-based CDC as enabled by Debezium. The Debezium connectors do all the heavy-lifting for you, i.e. you don’t have to deal with all the low-level specifics of the individual databases and the means of getting changes from their logs. Instead, you can consume the generic and largely unified change data events produced by Debezium.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Five Advantages of Log-Based Change Data Capture

Yesterday I had the opportunity to present Debezium and the idea of change data capture (CDC) to the Darmstadt Java User Group. It was a great evening with lots of interesting discussions and questions. One of the questions being the following: what is the advantage of using a log-based change data capturing tool such as Debezium over simply polling for updated records?

So first of all, what’s the difference between the two approaches? With polling-based (or query-based) CDC you repeatedly run queries (e.g. via JDBC) for retrieving any newly inserted or updated rows from the tables to be captured. Log-based CDC in contrast works by reacting to any changes to the database’s log files (e.g. MySQL’s binlog or MongoDB’s op log).

As this wasn’t the first time this question came up, I thought I could provide a more extensive answer also here on the blog. That way I’ll be able to refer to this post in the future, should the question come up again :)

So without further ado, here’s my list of five advantages of log-based CDC over polling-based approaches.

All Data Changes Are Captured

By reading the database’s log, you get the complete list of all data changes in their exact order of application. This is vital for many use cases where you are interested in the complete history of record changes. In contrast, with a polling-based approach you might miss intermediary data changes that happen between two runs of the poll loop. For instance it could happen that a record is inserted and deleted between two polls, in which case this record would never be captured by poll-based CDC.

Related to this is the aspect of downtimes, e.g. when updating the CDC tool. With poll-based CDC, only the latest state of a given record would be captured once the CDC tool is back online, missing any earlier changes to the record that occurred during the downtime. A log-based CDC tool will be able to resume reading the database log from the point where it left off before it was shut down, causing the complete history of data changes to be captured.

Low Delays of Events While Avoiding Increased CPU Load

With polling, you might be tempted to increase the frequency of polling attempts in order to reduce the chances of missing intermediary updates. While this works to some degree, polling too frequently may cause performance issues (as the queries used for polling cause load on the source database). On the other hand, expanding the polling interval will reduce the CPU load but may not only result in missed change events but also in a longer delay for propagating data changes. Log-based CDC allows you to react to data changes in near real-time without paying the price of spending CPU time on running polling queries repeatedly.

No Impact on Data Model

Polling requires some indicator to identify those records that have been changed since the last poll. So all the captured tables need to have some column like LAST_UPDATE_TIMESTAMP which can be used to find changed rows. This can be fine in some cases, but in others such requirement might not be desirable. Specifically, you’ll need to make sure that the update timestamps are maintained correctly on all tables to be captured by the writing applications or e.g. through triggers.

Can Capture Deletes

Naturally, polling will not allow you to identify any records that have been deleted since the last poll. Often times that’s a problem for replication-like use cases where you’d like to have an identical data set on the source database and the replication targets, meaning you’d also like to delete records on the sink side if they have been removed in the source database.

Can Capture Old Record State And Further Meta Data

Depending on the source database’s capabilities, log-based CDC can provide the old record state for update and delete events. Whereas with polling, you’ll only get the current row state. Having the old row state handy in a single change event can be interesting for many use cases, e.g. if you’d like to display the complete data change with old and new column values to an application user for auditing purposes.

In addition, log-based approaches often can provide streams of schema changes (e.g. in form of applied DDL statements) and expose additional metadata such as transaction ids or the user applying a certain change. These things may generally be doable with query-based approaches, too (depending on the capabilities of the database), I haven’t really seen it being done in practice, though.

Summary

And that’s it, five advantages of log-based change data capture. Note that this is not to say that polling-based CDC doesn’t have its applications. If for instance your use case can be satisfied by propagating changes once per hour and it’s not a problem to miss intermediary versions of records that were valid in between, it can be perfectly fine.

But if you’re interested in capturing data changes in near real-time, making sure you don’t miss any change events (including deletions), then I’d recommend very much to explore the possibilities of log-based CDC as enabled by Debezium. The Debezium connectors do all the heavy-lifting for you, i.e. you don’t have to deal with all the low-level specifics of the individual databases and the means of getting changes from their logs. Instead, you can consume the generic and largely unified change data events produced by Debezium.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/07/26/debezium-0-9-0-alpha1-released/index.html b/blog/2018/07/26/debezium-0-9-0-alpha1-released/index.html index a8cded0299..faf861c311 100644 --- a/blog/2018/07/26/debezium-0-9-0-alpha1-released/index.html +++ b/blog/2018/07/26/debezium-0-9-0-alpha1-released/index.html @@ -1 +1 @@ - Debezium 0.9 Alpha1 and 0.8.1 Released

Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

SQL Server Connector

Support for SQL Server had been on the wish list of Debezium users for a long time (the original issue was DBZ-40). Thanks to lots of basic infrastructure created while working on the Oracle connector, we were finally able to come up with a first preview of this new connector in comparatively short time of development.

Just as the Oracle connector, the one for SQL Server is under active development and should be considered an incubating feature at this point. So for instance the structure of emitted change messages may change in upcoming releases. In terms of features, it supports initial snapshotting and capturing changes via SQL Server’s CDC functionality. There’s support for the most common column types, table whitelisting/blacklisting and more. The most significant feature missing is support for structural changes of tables while the connector is running. This is the next feature we’ll work on and it’s planned to be delivered as part of the next 0.9 release (see DBZ-812).

We’d be very happy to learn about any feedback you may have on this newest connector of the Debezium family. If you spot any bugs or have feature requests for it, please create a report in our JIRA tracker.

Oracle Connector

The Debezium connector for Oracle is able to take initial snapshots now. By means of the new connector option snapshot.mode you can control whether read events for all the records of all the captured tables should be emitted.

In addition the support for numeric data types has been honed (DBZ-804); any integer columns (i.e. NUMBER with a scale <\= 0) will be emitted using the corresponding int8/int16/int32/int64 field type, if the columns precision allows for that.

We’ve also spent some time on expanding the Oracle connector documentation, which covers the structure of emitted change events and all the data type mappings in detail now.

Debezium 0.8.1.Final

Together with Debezium 0.9.0.Alpha1 we also did another release of the current stable Debezium version 0.8.

While 0.9 at this point is more interesting to those eager to try out the latest developments in the Oracle and SQL Server connectors, 0.8.1.Final is a recommended upgrade especially to the users of the Postgres connector. This release fixes an issue where it could happen that WAL segments on the server were retained longer than necessary, in case only records of non-whitelisted tables changed for a while. This has been addressed by means of supporting heartbeat messages (as already known from the MySQL connector) also for Postgres (DBZ-800). This lets the connector regularly commit offsets to Kafka Connect which also serves as the hook to acknowledge processed LSNs with the Postgres server.

You can find the list of all changes done in Debezium 0.8.1.Final in the change log.

What’s next?

As discussed above, we’ll work on supporting structural changes to captured tables while the SQL Server connector is running. The same applies to the Oracle connector. This will require some work on our DDL parsers, but thanks to the foundations provided by our recent migration of the MySQL DDL parser to Antlr, this should be manageable.

The other big focus of work with be to provide an alternative implementation for getting changes from Oracle which isn’t based on the XStream API. We’ve done some experiments with LogMiner and are also actively exploring further alternatives. While some details are still unclear, we are optimistic to have something to release in this area soon.

If you’d like to learn more about some middle and long term ideas, please check out our roadmap. Also please get in touch with us if you got any ideas or suggestions for future development.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9 Alpha1 and 0.8.1 Released

Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

SQL Server Connector

Support for SQL Server had been on the wish list of Debezium users for a long time (the original issue was DBZ-40). Thanks to lots of basic infrastructure created while working on the Oracle connector, we were finally able to come up with a first preview of this new connector in comparatively short time of development.

Just as the Oracle connector, the one for SQL Server is under active development and should be considered an incubating feature at this point. So for instance the structure of emitted change messages may change in upcoming releases. In terms of features, it supports initial snapshotting and capturing changes via SQL Server’s CDC functionality. There’s support for the most common column types, table whitelisting/blacklisting and more. The most significant feature missing is support for structural changes of tables while the connector is running. This is the next feature we’ll work on and it’s planned to be delivered as part of the next 0.9 release (see DBZ-812).

We’d be very happy to learn about any feedback you may have on this newest connector of the Debezium family. If you spot any bugs or have feature requests for it, please create a report in our JIRA tracker.

Oracle Connector

The Debezium connector for Oracle is able to take initial snapshots now. By means of the new connector option snapshot.mode you can control whether read events for all the records of all the captured tables should be emitted.

In addition the support for numeric data types has been honed (DBZ-804); any integer columns (i.e. NUMBER with a scale <\= 0) will be emitted using the corresponding int8/int16/int32/int64 field type, if the columns precision allows for that.

We’ve also spent some time on expanding the Oracle connector documentation, which covers the structure of emitted change events and all the data type mappings in detail now.

Debezium 0.8.1.Final

Together with Debezium 0.9.0.Alpha1 we also did another release of the current stable Debezium version 0.8.

While 0.9 at this point is more interesting to those eager to try out the latest developments in the Oracle and SQL Server connectors, 0.8.1.Final is a recommended upgrade especially to the users of the Postgres connector. This release fixes an issue where it could happen that WAL segments on the server were retained longer than necessary, in case only records of non-whitelisted tables changed for a while. This has been addressed by means of supporting heartbeat messages (as already known from the MySQL connector) also for Postgres (DBZ-800). This lets the connector regularly commit offsets to Kafka Connect which also serves as the hook to acknowledge processed LSNs with the Postgres server.

You can find the list of all changes done in Debezium 0.8.1.Final in the change log.

What’s next?

As discussed above, we’ll work on supporting structural changes to captured tables while the SQL Server connector is running. The same applies to the Oracle connector. This will require some work on our DDL parsers, but thanks to the foundations provided by our recent migration of the MySQL DDL parser to Antlr, this should be manageable.

The other big focus of work with be to provide an alternative implementation for getting changes from Oracle which isn’t based on the XStream API. We’ve done some experiments with LogMiner and are also actively exploring further alternatives. While some details are still unclear, we are optimistic to have something to release in this area soon.

If you’d like to learn more about some middle and long term ideas, please check out our roadmap. Also please get in touch with us if you got any ideas or suggestions for future development.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/08/30/debezium-0-8-2-released/index.html b/blog/2018/08/30/debezium-0-8-2-released/index.html index 6d45b62ba6..2a583ef981 100644 --- a/blog/2018/08/30/debezium-0-8-2-released/index.html +++ b/blog/2018/08/30/debezium-0-8-2-released/index.html @@ -1 +1 @@ - Debezium 0.8.2 Released

The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

What’s next?

We’re continuing the work on Debezium 0.9, which will mostly be about improvements to the SQL Server and Oracle connectors. Both will get support for handling structural changes to captured tables while the connectors are running. Also the exploration of alternatives to using the XStream API for the Oracle connector continues.

Finally, a recurring theme of our work is to further consolidate the code bases of the different connectors, which will allow us to roll out new and improved features more quickly across all the Debezium connectors. The recently added Oracle and SQL Server connectors already share a lot of code, and in the next step we’ve planned to move the existing Postgres connector to the new basis established for these two connectors.

If you’d like to learn more about some middle and long term ideas, please check out our roadmap. Also please get in touch with us if you got any ideas or suggestions for future development.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.8.2 Released

The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

What’s next?

We’re continuing the work on Debezium 0.9, which will mostly be about improvements to the SQL Server and Oracle connectors. Both will get support for handling structural changes to captured tables while the connectors are running. Also the exploration of alternatives to using the XStream API for the Oracle connector continues.

Finally, a recurring theme of our work is to further consolidate the code bases of the different connectors, which will allow us to roll out new and improved features more quickly across all the Debezium connectors. The recently added Oracle and SQL Server connectors already share a lot of code, and in the next step we’ve planned to move the existing Postgres connector to the new basis established for these two connectors.

If you’d like to learn more about some middle and long term ideas, please check out our roadmap. Also please get in touch with us if you got any ideas or suggestions for future development.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/08/30/streaming-mysql-data-changes-into-kinesis/index.html b/blog/2018/08/30/streaming-mysql-data-changes-into-kinesis/index.html index 685b32d811..ceeaefbb61 100644 --- a/blog/2018/08/30/streaming-mysql-data-changes-into-kinesis/index.html +++ b/blog/2018/08/30/streaming-mysql-data-changes-into-kinesis/index.html @@ -170,4 +170,4 @@ "op": "u", "ts_ms": 1535627622546 } -}

Once you’re done, stop the embedded engine application by hitting Ctrl + C, stop the MySQL server by running docker stop mysql and delete the kinesis.inventory.customers stream in Kinesis.

Summary and Outlook

In this blog post we’ve demonstrated that Debezium cannot only be used to stream data changes into Apache Kafka, but also into other streaming platforms such as Amazon Kinesis. Leveraging its embedded engine and by implementing a bit of glue code, you can benefit from all the CDC connectors provided by Debezium and their capabilities and connect them to the streaming solution of your choice.

And we’re thinking about even further simplifying this usage of Debezium. Instead of requiring you to implement your own application that invokes the embedded engine API, we’re considering to provide a small self-contained Debezium runtime which you can simply execute. It’d be configured with the source connector to run and make use of an outbound plug-in SPI with ready-to-use implementations for Kinesis, Apache Pulsar and others. Of course such runtime would also provide suitable implementations for safely persisting offsets and database history, and it’d offer means of monitoring, health checks etc. Meaning you could connect the Debezium source connectors with your preferred streaming platform in a robust and reliable way, without any manual coding required!

If you like this idea, then please check out JIRA issue DBZ-651 and let us know about your thoughts, e.g. by leaving a comment on the issue, in the comment section below or on our mailing list.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +}

Once you’re done, stop the embedded engine application by hitting Ctrl + C, stop the MySQL server by running docker stop mysql and delete the kinesis.inventory.customers stream in Kinesis.

Summary and Outlook

In this blog post we’ve demonstrated that Debezium cannot only be used to stream data changes into Apache Kafka, but also into other streaming platforms such as Amazon Kinesis. Leveraging its embedded engine and by implementing a bit of glue code, you can benefit from all the CDC connectors provided by Debezium and their capabilities and connect them to the streaming solution of your choice.

And we’re thinking about even further simplifying this usage of Debezium. Instead of requiring you to implement your own application that invokes the embedded engine API, we’re considering to provide a small self-contained Debezium runtime which you can simply execute. It’d be configured with the source connector to run and make use of an outbound plug-in SPI with ready-to-use implementations for Kinesis, Apache Pulsar and others. Of course such runtime would also provide suitable implementations for safely persisting offsets and database history, and it’d offer means of monitoring, health checks etc. Meaning you could connect the Debezium source connectors with your preferred streaming platform in a robust and reliable way, without any manual coding required!

If you like this idea, then please check out JIRA issue DBZ-651 and let us know about your thoughts, e.g. by leaving a comment on the issue, in the comment section below or on our mailing list.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/09/19/debezium-0-8-3-final-released/index.html b/blog/2018/09/19/debezium-0-8-3-final-released/index.html index db6227ad1b..6f7e1e43b6 100644 --- a/blog/2018/09/19/debezium-0-8-3-final-released/index.html +++ b/blog/2018/09/19/debezium-0-8-3-final-released/index.html @@ -7,4 +7,4 @@ false | Thomas | 1001 | Sally | sally.thomas@acme.com false | Bailey | 1002 | George | gbailey@foobar.com false | Kretchmar | 1004 | Anne | annek@noanswer.org -true | Walker | 1003 | Edward | ed@walker.com

You then for instance can use a batch job running on your sink to remove all records flagged as deleted.

What’s next?

We’re continuing the work on Debezium 0.9, which will mostly be about improvements to the SQL Server and Oracle connectors. The current plan is to do the next 0.9 release (either Alpha2 or Beta1) in two weeks from now.

Also it’s the beginning of the conference season, so we’ll spend some time with preparing demos and presenting Debezium at multiple locations. There will be sessions on change data capture with Debezium a these conferences:

If you are at any of these conferences, come and say Hi; we’d love to exchange with you about your use cases, feature requests, feedback on our roadmap and any other ideas around Debezium.

Finally, a big "Thank You" goes to our fantastic community members Andrey Pustovetov, Maciej Bryński and Peng Lyu for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +true | Walker | 1003 | Edward | ed@walker.com

You then for instance can use a batch job running on your sink to remove all records flagged as deleted.

What’s next?

We’re continuing the work on Debezium 0.9, which will mostly be about improvements to the SQL Server and Oracle connectors. The current plan is to do the next 0.9 release (either Alpha2 or Beta1) in two weeks from now.

Also it’s the beginning of the conference season, so we’ll spend some time with preparing demos and presenting Debezium at multiple locations. There will be sessions on change data capture with Debezium a these conferences:

If you are at any of these conferences, come and say Hi; we’d love to exchange with you about your use cases, feature requests, feedback on our roadmap and any other ideas around Debezium.

Finally, a big "Thank You" goes to our fantastic community members Andrey Pustovetov, Maciej Bryński and Peng Lyu for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/09/20/materializing-aggregate-views-with-hibernate-and-debezium/index.html b/blog/2018/09/20/materializing-aggregate-views-with-hibernate-and-debezium/index.html index 127872e6de..c2a03de291 100644 --- a/blog/2018/09/20/materializing-aggregate-views-with-hibernate-and-debezium/index.html +++ b/blog/2018/09/20/materializing-aggregate-views-with-hibernate-and-debezium/index.html @@ -212,4 +212,4 @@ }, "timed_out": false, "took": 11 - }

And there you have it: a customer’s complete data, including their addresses, categories, tags etc., materialized into a single document within Elasticsearch. If you’re using JPA to update the customer, you’ll see the data in the index being updated accordingly in near-realtime.

Pros and Cons

So what are the advantages and disadvantages of this approach for materializing aggregates from multiple source tables compared to the KStreams-based approach?

The big advantage is consistency and awareness of transactional boundaries, whereas the KStreams-based solution in its suggested form was prone to exposing intermediary aggregates. For instance, if you’re storing a customer and three addresses, it might happen that the streaming query first creates an aggregation of the customer and the two addresses inserted first, and shortly thereafter the complete aggregate with all three addresses. This not the case for the approach discussed here, as you’ll only ever stream complete aggregates to Kafka. Also this approach feels a bit more "light-weight", i.e. a simple marker annotation (together with some Jackson annotations for fine-tuning the emitted JSON structures) is enough in order to materialize aggregates from your domain model, whereas some more effort was needed to set up the required streams, temporary tables etc. with the KStreams solution.

The downside of driving aggregations through the application layer is that it’s not fully agnostic to the way you access the primary data. If you bypass the application, e.g. by patching data directly in the database, naturally these updates would be missed, requiring a refresh of affected aggregates. Although this again could be done through change data capture and Debezium: change events to source tables could be captured and consumed by the application itself, allowing it to re-materialize aggregates after external data changes. You also might argue that running JSON serializations within source transactions and storing aggregates within the source database represents some overhead. This often may be acceptable, though.

Another question to ask is what’s the advantage of using change data capture on an intermediary aggregate table over simply posting REST requests to Elasticsearch. The answer is the highly increased robustness and fault tolerance. If the Elasticsearch cluster can’t be accessed for some reason, the machinery of Kafka and Kafka Connect will ensure that any change events will be propagated eventually, once the sink is up again. Also other consumers than Elasticsearch can subscribe to the aggregate topic, the log can be replayed from the beginning etc.

Note that while we’ve been talking primarily about using Elasticsearch as a data sink, there are also other datastores and connectors that support complexly structured records. One example would be MongoDB and the sink connector maintained by Hans-Peter Grahsl, which one could use to sink customer aggregates into MongoDB, for instance enabling efficient retrieval of a customer and all their associated data with a single primary key look-up.

Outlook

The Hibernate ORM extension as well as the SMT discussed in this post can be found in our examples repository. They should be considered to be at "proof-of-concept" level currently.

That being said, we’re considering to make this a Debezium component proper, allowing you to employ this aggregation approach within your Hibernate-based applications just by pulling in this new component. For that we’d have to improve a few things first, though. Most importantly, an API is needed which will let you (re-)create aggregates on demand, e.g. for existing data or for data updated by bulk updates via the Criteria API / JPQL (which will be missed by listeners). Also aggregates should be re-created automatically, if any of the referenced entities change (with the current PoC, only a change to the customer instance itself will trigger its aggregate view to be rebuilt, but not a change to one of its addresses).

If you like this idea, then let us know about it, so we can gauge the general interest in this. Also, this would be a great item to work on, if you’re interested in contributing to the Debezium project. Looking forward to hearing from you, e.g. in the comment section below or on our mailing list.

Thanks a lot to Hans-Peter Grahsl for his feedback on an earlier version of this post!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + }

And there you have it: a customer’s complete data, including their addresses, categories, tags etc., materialized into a single document within Elasticsearch. If you’re using JPA to update the customer, you’ll see the data in the index being updated accordingly in near-realtime.

Pros and Cons

So what are the advantages and disadvantages of this approach for materializing aggregates from multiple source tables compared to the KStreams-based approach?

The big advantage is consistency and awareness of transactional boundaries, whereas the KStreams-based solution in its suggested form was prone to exposing intermediary aggregates. For instance, if you’re storing a customer and three addresses, it might happen that the streaming query first creates an aggregation of the customer and the two addresses inserted first, and shortly thereafter the complete aggregate with all three addresses. This not the case for the approach discussed here, as you’ll only ever stream complete aggregates to Kafka. Also this approach feels a bit more "light-weight", i.e. a simple marker annotation (together with some Jackson annotations for fine-tuning the emitted JSON structures) is enough in order to materialize aggregates from your domain model, whereas some more effort was needed to set up the required streams, temporary tables etc. with the KStreams solution.

The downside of driving aggregations through the application layer is that it’s not fully agnostic to the way you access the primary data. If you bypass the application, e.g. by patching data directly in the database, naturally these updates would be missed, requiring a refresh of affected aggregates. Although this again could be done through change data capture and Debezium: change events to source tables could be captured and consumed by the application itself, allowing it to re-materialize aggregates after external data changes. You also might argue that running JSON serializations within source transactions and storing aggregates within the source database represents some overhead. This often may be acceptable, though.

Another question to ask is what’s the advantage of using change data capture on an intermediary aggregate table over simply posting REST requests to Elasticsearch. The answer is the highly increased robustness and fault tolerance. If the Elasticsearch cluster can’t be accessed for some reason, the machinery of Kafka and Kafka Connect will ensure that any change events will be propagated eventually, once the sink is up again. Also other consumers than Elasticsearch can subscribe to the aggregate topic, the log can be replayed from the beginning etc.

Note that while we’ve been talking primarily about using Elasticsearch as a data sink, there are also other datastores and connectors that support complexly structured records. One example would be MongoDB and the sink connector maintained by Hans-Peter Grahsl, which one could use to sink customer aggregates into MongoDB, for instance enabling efficient retrieval of a customer and all their associated data with a single primary key look-up.

Outlook

The Hibernate ORM extension as well as the SMT discussed in this post can be found in our examples repository. They should be considered to be at "proof-of-concept" level currently.

That being said, we’re considering to make this a Debezium component proper, allowing you to employ this aggregation approach within your Hibernate-based applications just by pulling in this new component. For that we’d have to improve a few things first, though. Most importantly, an API is needed which will let you (re-)create aggregates on demand, e.g. for existing data or for data updated by bulk updates via the Criteria API / JPQL (which will be missed by listeners). Also aggregates should be re-created automatically, if any of the referenced entities change (with the current PoC, only a change to the customer instance itself will trigger its aggregate view to be rebuilt, but not a change to one of its addresses).

If you like this idea, then let us know about it, so we can gauge the general interest in this. Also, this would be a great item to work on, if you’re interested in contributing to the Debezium project. Looking forward to hearing from you, e.g. in the comment section below or on our mailing list.

Thanks a lot to Hans-Peter Grahsl for his feedback on an earlier version of this post!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/10/04/debezium-0-9-0-alpha2-released/index.html b/blog/2018/10/04/debezium-0-9-0-alpha2-released/index.html index cf7951eaae..21a37bbf10 100644 --- a/blog/2018/10/04/debezium-0-9-0-alpha2-released/index.html +++ b/blog/2018/10/04/debezium-0-9-0-alpha2-released/index.html @@ -1 +1 @@ - Debezium 0.9.0.Alpha2 Released

It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

Kafka Upgrade

Debezium runs with and has been tested on top of the recently released Apache Kafka 2.0 (DBZ-858). The widely used version Kafka 1.x continues to be supported as well.

Note that 0.10.x is not supported due to Debezium’s usage of the admin client API which is only available in later versions. It shouldn’t be too hard to work around this, so if someone is interested in helping out with this, this would be a great contribution (see DBZ-883).

Support for HSTORE columns in Postgres

Postgres is an amazingly powerful and flexible RDBMS, not the least due to its wide range of column types which go far beyond what’s defined by the SQL standard. One of these types being HSTORE, which is a string-to-string map essentially.

Debezium can capture changes to columns of this type now (DBZ-898). By default, the field values will be represented using Kafka Connect’s map data type. As this may not be supported by all sink connectors, you might alternatively represent them as a string-ified JSON by setting the new hstore.handling.mode connector option to json. In this case, you’d see HSTORE columns represented as values in change messages like so: { "key1" : "val1", "key2" : "val2" }.

Field filtering and renaming for MongoDB

Unlike the connectors for MySQL and Postgres, the Debezium MongoDB connector so far didn’t allow to exclude single fields of captured collections from CDC messages. Also renaming them wasn’t supported e.g. by means of Kafka’s ReplaceField SMT. The reason being that MongoDB doesn’t mandate a fixed schema for the documents of a given collection, and documents therefore are represented in change messages using a single string-ified JSON field.

Thanks to the fantastic work of community member Andrey Pustovetov, this finally has changed, i.e. you can remove given fields (DBZ-633) now from the CDC messages of given collections or have them renamed (DBZ-881). Please refer to the description of the new connector options field.blacklist and field.renames in the MongoDB connector documentation to learn more.

Extended source info

Another contribution by Andrey is the new optional connector field within the source info block of CDC messages (DBZ-918). This tells the type of source connector that produced the messages ("mysql", "postgres" etc.), which can come in handy in cases where specific semantics need to be applied on the consumer side depending on the type of source database.

Bug fixes and version upgrades

The new release contains a good number of bug fixes and other smaller improvements. Amongst them are

  • correct handling of invalid temporal default values with MySQL (DBZ-927),

  • support for table/collection names with special characters for MySQL (DBZ-878) and MongoDB (DBZ-865) and

  • fixed handling of blacklisted tables with the new Antlr-based DDL parser (DBZ-872).

Community member Ian Axelrod provided a fix for a potential performance issue, where changes to tables with TOAST columns in Postgres would cause repeated updates to the connector’s internal schema metadata, which can be a costly operation (DBZ-911). Please refer to the Postgres connector documentation for details on the new schema.refresh.mode option, which deals with this issue.

In terms of version upgrades we migrated to the latest releases of the MySQL (DBZ-763, DBZ-764) and Postgres drivers (DBZ-912). The former is part of a longer stream of work leading towards support of MySQL 8 which should be finished in one of the next Debezium releases. For Postgres we provide a Docker image with Debezium’s supported logical decoding plug-ins based on Alpine now, which might be interesting to those concerned about container size (DBZ-705).

Please see the change log for the complete list of fixed issues.

What’s next?

The work towards Debezium 0.9 continues, and we’ll focus mostly on improvements to the SQL Server and Oracle connectors. Other potential topics include support for MySQL 8 and native logical decoding as introduced with Postgres 10, which should greatly help with using the Debezium Postgres connectors in cloud environments such as Amazon RDS.

We’ll also be talking about Debezium at the following conferences:

Already last week I had the opportunity to present Debezium at JUG Saxony Day. If you are interested, you can find the (German) slideset of that talk on Speaker Deck.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.0.Alpha2 Released

It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

Kafka Upgrade

Debezium runs with and has been tested on top of the recently released Apache Kafka 2.0 (DBZ-858). The widely used version Kafka 1.x continues to be supported as well.

Note that 0.10.x is not supported due to Debezium’s usage of the admin client API which is only available in later versions. It shouldn’t be too hard to work around this, so if someone is interested in helping out with this, this would be a great contribution (see DBZ-883).

Support for HSTORE columns in Postgres

Postgres is an amazingly powerful and flexible RDBMS, not the least due to its wide range of column types which go far beyond what’s defined by the SQL standard. One of these types being HSTORE, which is a string-to-string map essentially.

Debezium can capture changes to columns of this type now (DBZ-898). By default, the field values will be represented using Kafka Connect’s map data type. As this may not be supported by all sink connectors, you might alternatively represent them as a string-ified JSON by setting the new hstore.handling.mode connector option to json. In this case, you’d see HSTORE columns represented as values in change messages like so: { "key1" : "val1", "key2" : "val2" }.

Field filtering and renaming for MongoDB

Unlike the connectors for MySQL and Postgres, the Debezium MongoDB connector so far didn’t allow to exclude single fields of captured collections from CDC messages. Also renaming them wasn’t supported e.g. by means of Kafka’s ReplaceField SMT. The reason being that MongoDB doesn’t mandate a fixed schema for the documents of a given collection, and documents therefore are represented in change messages using a single string-ified JSON field.

Thanks to the fantastic work of community member Andrey Pustovetov, this finally has changed, i.e. you can remove given fields (DBZ-633) now from the CDC messages of given collections or have them renamed (DBZ-881). Please refer to the description of the new connector options field.blacklist and field.renames in the MongoDB connector documentation to learn more.

Extended source info

Another contribution by Andrey is the new optional connector field within the source info block of CDC messages (DBZ-918). This tells the type of source connector that produced the messages ("mysql", "postgres" etc.), which can come in handy in cases where specific semantics need to be applied on the consumer side depending on the type of source database.

Bug fixes and version upgrades

The new release contains a good number of bug fixes and other smaller improvements. Amongst them are

  • correct handling of invalid temporal default values with MySQL (DBZ-927),

  • support for table/collection names with special characters for MySQL (DBZ-878) and MongoDB (DBZ-865) and

  • fixed handling of blacklisted tables with the new Antlr-based DDL parser (DBZ-872).

Community member Ian Axelrod provided a fix for a potential performance issue, where changes to tables with TOAST columns in Postgres would cause repeated updates to the connector’s internal schema metadata, which can be a costly operation (DBZ-911). Please refer to the Postgres connector documentation for details on the new schema.refresh.mode option, which deals with this issue.

In terms of version upgrades we migrated to the latest releases of the MySQL (DBZ-763, DBZ-764) and Postgres drivers (DBZ-912). The former is part of a longer stream of work leading towards support of MySQL 8 which should be finished in one of the next Debezium releases. For Postgres we provide a Docker image with Debezium’s supported logical decoding plug-ins based on Alpine now, which might be interesting to those concerned about container size (DBZ-705).

Please see the change log for the complete list of fixed issues.

What’s next?

The work towards Debezium 0.9 continues, and we’ll focus mostly on improvements to the SQL Server and Oracle connectors. Other potential topics include support for MySQL 8 and native logical decoding as introduced with Postgres 10, which should greatly help with using the Debezium Postgres connectors in cloud environments such as Amazon RDS.

We’ll also be talking about Debezium at the following conferences:

Already last week I had the opportunity to present Debezium at JUG Saxony Day. If you are interested, you can find the (German) slideset of that talk on Speaker Deck.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/11/22/debezium-0-9-0-beta1-released/index.html b/blog/2018/11/22/debezium-0-9-0-beta1-released/index.html index 8812cd8646..7b95a47ecd 100644 --- a/blog/2018/11/22/debezium-0-9-0-beta1-released/index.html +++ b/blog/2018/11/22/debezium-0-9-0-beta1-released/index.html @@ -1 +1 @@ - Debezium 0.9.0.Beta1 Released

It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

  • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

  • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

  • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

Besides that, we spent some time on supporting the latest versions of the different databases. The Debezium connectors now support Postgres 11 (DBZ-955) and MongoDB 4.0 (DBZ-974). We are also working on supporting MySQL 8.0, which should be completed in the next 0.9.x release. The Debezium container images have been updated to Kafka 2.0.1 (DBZ-979) and the Kafka Connect image now supports the STATUS_STORAGE_TOPIC environment variable, bringing consistency with CONFIG_STORAGE_TOPIC and OFFSET_STORAGE_TOPIC that already were supported before (DBZ-893).

As usual, several bugs were fixed, too. Several of them dealt with the new Antlr-based DDL parser for the MySQL connector. By now we feel confident about its implementation, so it’s the default DDL parser as of this release (DBZ-757). If you would like to continue to use the legacy parser for some reason, you can do so by setting the ddl.parser.mode connector option to "legacy". This implementation will remain available in the lifetime of Debezium 0.9.x and is scheduled for removal after that. So please make sure to log issues in JIRA should you run into any problems with the Antlr parser.

Overall, this release contains 21 fixes. Thanks a lot to all the community members who helped with making this happen: Anton Martynov, Deepak Barr, Grzegorz Kołakowski, Olavi Mustanoja, Renato Mefi, Sagar Rao and Shivam Sharma!

What else?

While the work towards Debezium 0.9 continues, we’ve lately been quite busy with presenting Debezium at multiple conferences. You can find the slides and recordings from Kafka Summit San Francisco and Voxxed Days Microservices on our list of online resources around Debezium.

There you also can find the links to the slides of the great talk "The Why’s and How’s of Database Streaming" by Joy Gao of WePay, a Debezium user of the first hour, as well as the link to a blog post by Hans-Peter Grahsl about setting up a CDC pipeline from MySQL into Cosmos DB running on Azure. If you know about other great articles, session recordings or similar on Debezium and change data capture which should be added there, please let us know.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.0.Beta1 Released

It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

  • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

  • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

  • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

Besides that, we spent some time on supporting the latest versions of the different databases. The Debezium connectors now support Postgres 11 (DBZ-955) and MongoDB 4.0 (DBZ-974). We are also working on supporting MySQL 8.0, which should be completed in the next 0.9.x release. The Debezium container images have been updated to Kafka 2.0.1 (DBZ-979) and the Kafka Connect image now supports the STATUS_STORAGE_TOPIC environment variable, bringing consistency with CONFIG_STORAGE_TOPIC and OFFSET_STORAGE_TOPIC that already were supported before (DBZ-893).

As usual, several bugs were fixed, too. Several of them dealt with the new Antlr-based DDL parser for the MySQL connector. By now we feel confident about its implementation, so it’s the default DDL parser as of this release (DBZ-757). If you would like to continue to use the legacy parser for some reason, you can do so by setting the ddl.parser.mode connector option to "legacy". This implementation will remain available in the lifetime of Debezium 0.9.x and is scheduled for removal after that. So please make sure to log issues in JIRA should you run into any problems with the Antlr parser.

Overall, this release contains 21 fixes. Thanks a lot to all the community members who helped with making this happen: Anton Martynov, Deepak Barr, Grzegorz Kołakowski, Olavi Mustanoja, Renato Mefi, Sagar Rao and Shivam Sharma!

What else?

While the work towards Debezium 0.9 continues, we’ve lately been quite busy with presenting Debezium at multiple conferences. You can find the slides and recordings from Kafka Summit San Francisco and Voxxed Days Microservices on our list of online resources around Debezium.

There you also can find the links to the slides of the great talk "The Why’s and How’s of Database Streaming" by Joy Gao of WePay, a Debezium user of the first hour, as well as the link to a blog post by Hans-Peter Grahsl about setting up a CDC pipeline from MySQL into Cosmos DB running on Azure. If you know about other great articles, session recordings or similar on Debezium and change data capture which should be added there, please let us know.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/12/05/automating-cache-invalidation-with-change-data-capture/index.html b/blog/2018/12/05/automating-cache-invalidation-with-change-data-capture/index.html index 9aa58742f6..c58ee6e69f 100644 --- a/blog/2018/12/05/automating-cache-invalidation-with-change-data-capture/index.html +++ b/blog/2018/12/05/automating-cache-invalidation-with-change-data-capture/index.html @@ -204,4 +204,4 @@ }

And with that, you got all the pieces in place: cached Items will only be evicted after external data changes, but not after changes done by the application itself. To confirm, you can invoke the example’s items resource using curl:

> curl -H "Content-Type: application/json" \
   -X PUT \
   --data '{ "description" : "North by Northwest", "price" : 20.99}' \
-  http://localhost:8080/cache-invalidation/rest/items/10003

When placing the next order for the item after this update, you should see that the Item entity is obtained from the cache, i.e. the change event will not have caused the item’s cache entry to be evicted. In contrast, if you update the item’s price via psql another time, the item should be removed from the cache and the order request will produce a cache miss, followed by a SELECT against the item table in the database.

Summary

In this blog post we’ve explored how Debezium and change data capture can be employed to invalidate application-level caches after external data changes. Compared to manual cache invalidation, this approach works very reliably (by capturing changes directly from the database log, no events will be missed) and fast (cache eviction happens in near-realtime after the data changes).

As you have seen, not too much glue code is needed in order to implement this. While the shown implementation is somewhat specific to the entities of the example, it should be possible to implement the change event handler in a more generic fashion, so that it can handle a set of configured entity types (essentially, the database change listener would have to convert the primary key field(s) from the change events into the primary key type of the corresponding entities in a generic way). Also such generic implementation would have to provide the logic for obtaining the current transaction id for the most commonly used databases.

Please let us know whether you think this would be an interesting extension to have for Debezium and Hibernate ORM. For instance this could be a new module under the Debezium umbrella, and it could also be a very great project to work on, should you be interested in contributing to Debezium. If you got any thoughts on this idea, please post a comment below or come to our mailing list.

Many thanks to Guillaume Smet, Hans-Peter Grahsl and Jiri Pechanec for their feedback while writing this post!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + http://localhost:8080/cache-invalidation/rest/items/10003

When placing the next order for the item after this update, you should see that the Item entity is obtained from the cache, i.e. the change event will not have caused the item’s cache entry to be evicted. In contrast, if you update the item’s price via psql another time, the item should be removed from the cache and the order request will produce a cache miss, followed by a SELECT against the item table in the database.

Summary

In this blog post we’ve explored how Debezium and change data capture can be employed to invalidate application-level caches after external data changes. Compared to manual cache invalidation, this approach works very reliably (by capturing changes directly from the database log, no events will be missed) and fast (cache eviction happens in near-realtime after the data changes).

As you have seen, not too much glue code is needed in order to implement this. While the shown implementation is somewhat specific to the entities of the example, it should be possible to implement the change event handler in a more generic fashion, so that it can handle a set of configured entity types (essentially, the database change listener would have to convert the primary key field(s) from the change events into the primary key type of the corresponding entities in a generic way). Also such generic implementation would have to provide the logic for obtaining the current transaction id for the most commonly used databases.

Please let us know whether you think this would be an interesting extension to have for Debezium and Hibernate ORM. For instance this could be a new module under the Debezium umbrella, and it could also be a very great project to work on, should you be interested in contributing to Debezium. If you got any thoughts on this idea, please post a comment below or come to our mailing list.

Many thanks to Guillaume Smet, Hans-Peter Grahsl and Jiri Pechanec for their feedback while writing this post!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2018/12/19/debezium-0-9-0-beta2-released/index.html b/blog/2018/12/19/debezium-0-9-0-beta2-released/index.html index 6216dd6b47..759ed0995c 100644 --- a/blog/2018/12/19/debezium-0-9-0-beta2-released/index.html +++ b/blog/2018/12/19/debezium-0-9-0-beta2-released/index.html @@ -1 +1 @@ - Debezium 0.9.0.Beta2 Released

With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

Monitoring and Metrics for the SQL Server and Oracle Connectors

Following the example of the MySQL connector, the connectors for SQL Server and Oracle now expose a range of metrics for monitoring purposes via JMX (DBZ-978). This includes values like the time since the last CDC event, offset of the last event, the total number of events, remaining and already scanned tables while doing a snapshot and much more. Please see the monitoring documentation for details on how to enable JMX. The following image shows an example of displaying the values in OpenJDK’s Mission Control tool:

Monitoring the Debezium SQL Server connector

We’re planning to expand the set of exposed metrics in future versions and also make them available for Postgres and MongoDB. Please let us know about the metrics you’d like to see by commenting on JIRA issue DBZ-1040.

As a bonus, we’ve also created a Grafana dashboard for visualizing all the relevant metrics:

Connector metrics in Grafana

We’ll blog about monitoring and the dashboard in more detail soon; but if you are interested, you already can take a look at this demo in our examples repository.

Misc. Features

The "snapshot.delay.ms" option already known from the Debezium MySQL connector is now available for all other Debezium connectors, too (DBZ-966). This comes in handy when deploying multiple connectors to a Kafka Connect cluster, which may cause rebalancing the connectors in the cluster, interrupting and restarting running snapshots of already deployed connector instances. This can be avoided by specifying a delay which allows to wait with the snapshotting until the rebalancing phase is completed.

The MongoDB CDC Event Flattening transformation received a number of improvements:

  • Support for MongoDB’s $unset operator (DBZ-612)

  • Support for full document updates (DBZ-987)

  • New option for dropping delete and tombstone messages (DBZ-563)

  • Option to convey the original type of operation as a header parameter (DBZ-971); that option is also available for the Flattening SMT for the relational connectors and can be useful in case sink connectors need to differentiate between inserts and updates

Bug fixes

As always, we’ve also fixed a good number of bugs reported by Debezium users. The set of fixed issues includes:

  • Several bugs related to streaming changes from MySQL in GTID mode (DBZ-923, DBZ-1005, DBZ-1008)

  • Handling of tables with reserved names in the SQL Server connector (DBZ-1031)

  • Potential event loss after MySQL connector restart (DBZ-1033)

  • Unchanged values of TOASTed columns caused the Postgres connector to fail (DBZ-842)

Please see the change log for the complete list of addressed issues.

Next Steps

We’re planning to do a candidate release of Debezium 0.9 in early January. Provided no critical issues show up, Debezium 0.9.0.Final should be out by the end of January. For the CR we’ve mostly scheduled a number of further bug fixes, improvements to the SQL Server connector and the addition of further metrics.

In parallel, we’ll focus our attention on the Oracle connector again, finally getting back to the long-awaited LogMiner-based capture implementation (DBZ-137). This will be a primary feature of Debezium 0.10.

In addition, we’ll spend some cycles on the blogging and demo side of things; namely we’re thinking about writing on and demoing the new monitoring and metrics support, HA architectures including failover with MySQL, HAProxy and Debezium, as well as enriching CDC events with contextual information such as the current user or use case identifiers. Stay tuned!

Also going beyond 0.10, we got some great plans for Debezium in the coming year. If you’d like to bring in your ideas, too, please let us know on the mailing list or in the comments below, we’re looking forward to hearing from you.

And with that, all that remains to be said, is "Happy Festivus for the rest of us!"

Happy change data streaming and see you in 2019!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.0.Beta2 Released

With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

Monitoring and Metrics for the SQL Server and Oracle Connectors

Following the example of the MySQL connector, the connectors for SQL Server and Oracle now expose a range of metrics for monitoring purposes via JMX (DBZ-978). This includes values like the time since the last CDC event, offset of the last event, the total number of events, remaining and already scanned tables while doing a snapshot and much more. Please see the monitoring documentation for details on how to enable JMX. The following image shows an example of displaying the values in OpenJDK’s Mission Control tool:

Monitoring the Debezium SQL Server connector

We’re planning to expand the set of exposed metrics in future versions and also make them available for Postgres and MongoDB. Please let us know about the metrics you’d like to see by commenting on JIRA issue DBZ-1040.

As a bonus, we’ve also created a Grafana dashboard for visualizing all the relevant metrics:

Connector metrics in Grafana

We’ll blog about monitoring and the dashboard in more detail soon; but if you are interested, you already can take a look at this demo in our examples repository.

Misc. Features

The "snapshot.delay.ms" option already known from the Debezium MySQL connector is now available for all other Debezium connectors, too (DBZ-966). This comes in handy when deploying multiple connectors to a Kafka Connect cluster, which may cause rebalancing the connectors in the cluster, interrupting and restarting running snapshots of already deployed connector instances. This can be avoided by specifying a delay which allows to wait with the snapshotting until the rebalancing phase is completed.

The MongoDB CDC Event Flattening transformation received a number of improvements:

  • Support for MongoDB’s $unset operator (DBZ-612)

  • Support for full document updates (DBZ-987)

  • New option for dropping delete and tombstone messages (DBZ-563)

  • Option to convey the original type of operation as a header parameter (DBZ-971); that option is also available for the Flattening SMT for the relational connectors and can be useful in case sink connectors need to differentiate between inserts and updates

Bug fixes

As always, we’ve also fixed a good number of bugs reported by Debezium users. The set of fixed issues includes:

  • Several bugs related to streaming changes from MySQL in GTID mode (DBZ-923, DBZ-1005, DBZ-1008)

  • Handling of tables with reserved names in the SQL Server connector (DBZ-1031)

  • Potential event loss after MySQL connector restart (DBZ-1033)

  • Unchanged values of TOASTed columns caused the Postgres connector to fail (DBZ-842)

Please see the change log for the complete list of addressed issues.

Next Steps

We’re planning to do a candidate release of Debezium 0.9 in early January. Provided no critical issues show up, Debezium 0.9.0.Final should be out by the end of January. For the CR we’ve mostly scheduled a number of further bug fixes, improvements to the SQL Server connector and the addition of further metrics.

In parallel, we’ll focus our attention on the Oracle connector again, finally getting back to the long-awaited LogMiner-based capture implementation (DBZ-137). This will be a primary feature of Debezium 0.10.

In addition, we’ll spend some cycles on the blogging and demo side of things; namely we’re thinking about writing on and demoing the new monitoring and metrics support, HA architectures including failover with MySQL, HAProxy and Debezium, as well as enriching CDC events with contextual information such as the current user or use case identifiers. Stay tuned!

Also going beyond 0.10, we got some great plans for Debezium in the coming year. If you’d like to bring in your ideas, too, please let us know on the mailing list or in the comments below, we’re looking forward to hearing from you.

And with that, all that remains to be said, is "Happy Festivus for the rest of us!"

Happy change data streaming and see you in 2019!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/01/28/debezium-0-9-0-cr1-released/index.html b/blog/2019/01/28/debezium-0-9-0-cr1-released/index.html index 0b44d956fa..1476f34193 100644 --- a/blog/2019/01/28/debezium-0-9-0-cr1-released/index.html +++ b/blog/2019/01/28/debezium-0-9-0-cr1-released/index.html @@ -1 +1 @@ - Debezium 0.9.0.CR1 Released

Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

SQL Server Connector Improvements

The SQL Server connector supports blacklisting of specific columns now (DBZ-1067). That’s useful in cases where you’d like to exclude specific columns from emitted change data messages, e.g. to data protection considerations.

The "snapshot locking mode" option has been reworked (DBZ-947) and is named "snapshot isolation mode" now, better reflecting its semantics. A new mode "repeatable_read" has been added, and "none" has been renamed to "read_uncommitted". Please see the connector documentation and the migration notes for more details.

The connector allows for a much higher through-put now, thanks to caching of timestamps for the same LSN (DBZ-1078). Please refer to the change log for details on bugs fixed in this connector. A massive "Thank You" is in order to Grzegorz Kołakowski, for his tireless work on and testing of this connector!

New Embedded Engine Handler Interface

Debezium’s embedded engine now comes with a new interface ChangeConsumer, which event handlers can implement if they’d like to process change events in bulks (DBZ-1080). That can result in substantial performance improvements when pushing change events to APIs that apply batch semantics themselves, such as the Kinesis Producer Library. You can learn more in the embedded engine docs.

Misc. Changes and Bug Fixes

All the relational connectors allow now to propagate the scale of numeric columns as a schema parameter (DBZ-1073). This is controlled via the column.propagate.source.type option and builds on the exposure of type name and width added in Debezium 0.8. All these schema parameters can be used when creating the schema of corresponding tables in sink databases.

Debezium’s container image for Apache Kafka allows to create and watch topics now (DBZ-1057). You also can specify a clean-up policy when creating a topic (DBZ-1038).

The Debezium MySQL connector handles unsigned SMALLINT columns as expected now. (DBZ-1063). For nullable columns with a default value, NULL values are correctly exported (DBZ-1064; previously, the default value would have been exported in that case).

The Postgres connector handles tables without a primary key correctly now (DBZ-1029). We’ve also applied a fix to make sure that the connector works with Postgres on Amazon RDS, which recently was broken due to an update of wal2json in RDS (DBZ-1083). Going forward, we’re planning to set-up CI jobs to test against Postgres on RDS in all the versions supported by the Debezium connector. This will help us to spot similar issues early on and react quickly.

Please see the change log for the complete list of all addressed issues.

This release wouldn’t have been possible without all the contributions by the following members of the Debezium community: Addison Higham, Amit Sela, Gagan Agrawal, Grzegorz Kołakowski, Ilia Bogdanov, Ivan Kovbas, Moira Tagle, Renato Mefi and Tony Rizko.

Thanks a lot!

Next Steps

The CR1 release took us a bit longer than anticipated. The release of Debezium 0.9.0.Final will therefore be moved to early February. Rather quickly thereafter we’re planning to release Debezium 0.9.1, which will provide improvements and potential bugfixes to the features added in 0.9.

For further plans beyond that, check out our road map. If you got any feedback or suggestions for future additions, please get in touch via the mailing list or in the comments below.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.0.CR1 Released

Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

SQL Server Connector Improvements

The SQL Server connector supports blacklisting of specific columns now (DBZ-1067). That’s useful in cases where you’d like to exclude specific columns from emitted change data messages, e.g. to data protection considerations.

The "snapshot locking mode" option has been reworked (DBZ-947) and is named "snapshot isolation mode" now, better reflecting its semantics. A new mode "repeatable_read" has been added, and "none" has been renamed to "read_uncommitted". Please see the connector documentation and the migration notes for more details.

The connector allows for a much higher through-put now, thanks to caching of timestamps for the same LSN (DBZ-1078). Please refer to the change log for details on bugs fixed in this connector. A massive "Thank You" is in order to Grzegorz Kołakowski, for his tireless work on and testing of this connector!

New Embedded Engine Handler Interface

Debezium’s embedded engine now comes with a new interface ChangeConsumer, which event handlers can implement if they’d like to process change events in bulks (DBZ-1080). That can result in substantial performance improvements when pushing change events to APIs that apply batch semantics themselves, such as the Kinesis Producer Library. You can learn more in the embedded engine docs.

Misc. Changes and Bug Fixes

All the relational connectors allow now to propagate the scale of numeric columns as a schema parameter (DBZ-1073). This is controlled via the column.propagate.source.type option and builds on the exposure of type name and width added in Debezium 0.8. All these schema parameters can be used when creating the schema of corresponding tables in sink databases.

Debezium’s container image for Apache Kafka allows to create and watch topics now (DBZ-1057). You also can specify a clean-up policy when creating a topic (DBZ-1038).

The Debezium MySQL connector handles unsigned SMALLINT columns as expected now. (DBZ-1063). For nullable columns with a default value, NULL values are correctly exported (DBZ-1064; previously, the default value would have been exported in that case).

The Postgres connector handles tables without a primary key correctly now (DBZ-1029). We’ve also applied a fix to make sure that the connector works with Postgres on Amazon RDS, which recently was broken due to an update of wal2json in RDS (DBZ-1083). Going forward, we’re planning to set-up CI jobs to test against Postgres on RDS in all the versions supported by the Debezium connector. This will help us to spot similar issues early on and react quickly.

Please see the change log for the complete list of all addressed issues.

This release wouldn’t have been possible without all the contributions by the following members of the Debezium community: Addison Higham, Amit Sela, Gagan Agrawal, Grzegorz Kołakowski, Ilia Bogdanov, Ivan Kovbas, Moira Tagle, Renato Mefi and Tony Rizko.

Thanks a lot!

Next Steps

The CR1 release took us a bit longer than anticipated. The release of Debezium 0.9.0.Final will therefore be moved to early February. Rather quickly thereafter we’re planning to release Debezium 0.9.1, which will provide improvements and potential bugfixes to the features added in 0.9.

For further plans beyond that, check out our road map. If you got any feedback or suggestions for future additions, please get in touch via the mailing list or in the comments below.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/02/05/debezium-0-9-0-final-released/index.html b/blog/2019/02/05/debezium-0-9-0-final-released/index.html index fd2062c326..c852597a8a 100644 --- a/blog/2019/02/05/debezium-0-9-0-final-released/index.html +++ b/blog/2019/02/05/debezium-0-9-0-final-released/index.html @@ -1 +1 @@ - Debezium 0.9.0.Final Released

I’m delighted to announce the release of Debezium 0.9 Final!

This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

Some key features of the release besides the aforementioned CDC connector for SQL Server are:

  • Initial snapshotting for the Oracle connector (which remains to be a "tech preview" at this point)

  • Brand-new metrics for the SQL Server and Oracle connectors and extended metrics for the MySQL connector

  • Field filtering and renaming for MongoDB

  • A new handler interface for the embedded engine

  • Lots of improvements around the "event flattening" SMT for MongoDB

  • More detailed source info in CDC events and optional metadata such as a column’s source type

  • Option to delay snapshots for a given time

  • Support for HSTORE columns in Postgres

  • Incubating support for picking up changes to the whitelist/blacklist configuration of the MySQL connector

As a teaser on the connector metrics support, here’s a screenshot of Java Mission Control displaying the SQL Server connector metrics:

Monitoring the Debezium SQL Server connector

The list above is far from being exhaustive; please take a look at the preview release announcements (Alpha1, Alpha2, Beta1, Beta2 and CR 1) as well as the full list of a whopping 176 fixed issues in JIRA.

It’s hard to say which of the changes and new features I’m most excited about, but one thing surely sticking out is the tremendous amount of community work on this release. Not less than 34 different members of Debezium’s outstanding community have contributed to this release. A huge and massive "Thank You!" to all of you:

When upgrading from earlier Debezium releases, please make sure to read the information regarding update procedures and breaking changes in the release notes. One relevant change to the users of the Debezium connector for MySQL is that our new Antlr-based DDL parser is used by default now. After lots of honing we felt it’s time for using the new parser by default now. While the existing parser can still be used as a fallback as of Debezium 0.9, it will be phased out in 0.10.

Next Steps

After some drinks to celebrate this release, the plan is to do a 0.9.1 release rather quickly (probably in two weeks from now), providing improvements and potential bug fixes to the features and changes done in 0.9. We’ll also begin the work on Debezium 0.10, stay tuned for the details on that!

For further plans beyond that, take a look at our road map. Any suggestions and ideas are very welcomed on mailing list or in the comments below.

If you’re just about to begin using Debezium for streaming changes out of your database, you might be interested in join us for the upcoming webinar on February 7th. After a quick overview, you’ll see Debezium in action, as it streams changes to a browser-based dashboard and more. You can also find lots of resources around Debezium and change data capture such as blog posts and presentations in our curated list of online resources.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.0.Final Released

I’m delighted to announce the release of Debezium 0.9 Final!

This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

Some key features of the release besides the aforementioned CDC connector for SQL Server are:

  • Initial snapshotting for the Oracle connector (which remains to be a "tech preview" at this point)

  • Brand-new metrics for the SQL Server and Oracle connectors and extended metrics for the MySQL connector

  • Field filtering and renaming for MongoDB

  • A new handler interface for the embedded engine

  • Lots of improvements around the "event flattening" SMT for MongoDB

  • More detailed source info in CDC events and optional metadata such as a column’s source type

  • Option to delay snapshots for a given time

  • Support for HSTORE columns in Postgres

  • Incubating support for picking up changes to the whitelist/blacklist configuration of the MySQL connector

As a teaser on the connector metrics support, here’s a screenshot of Java Mission Control displaying the SQL Server connector metrics:

Monitoring the Debezium SQL Server connector

The list above is far from being exhaustive; please take a look at the preview release announcements (Alpha1, Alpha2, Beta1, Beta2 and CR 1) as well as the full list of a whopping 176 fixed issues in JIRA.

It’s hard to say which of the changes and new features I’m most excited about, but one thing surely sticking out is the tremendous amount of community work on this release. Not less than 34 different members of Debezium’s outstanding community have contributed to this release. A huge and massive "Thank You!" to all of you:

When upgrading from earlier Debezium releases, please make sure to read the information regarding update procedures and breaking changes in the release notes. One relevant change to the users of the Debezium connector for MySQL is that our new Antlr-based DDL parser is used by default now. After lots of honing we felt it’s time for using the new parser by default now. While the existing parser can still be used as a fallback as of Debezium 0.9, it will be phased out in 0.10.

Next Steps

After some drinks to celebrate this release, the plan is to do a 0.9.1 release rather quickly (probably in two weeks from now), providing improvements and potential bug fixes to the features and changes done in 0.9. We’ll also begin the work on Debezium 0.10, stay tuned for the details on that!

For further plans beyond that, take a look at our road map. Any suggestions and ideas are very welcomed on mailing list or in the comments below.

If you’re just about to begin using Debezium for streaming changes out of your database, you might be interested in join us for the upcoming webinar on February 7th. After a quick overview, you’ll see Debezium in action, as it streams changes to a browser-based dashboard and more. You can also find lots of resources around Debezium and change data capture such as blog posts and presentations in our curated list of online resources.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/02/13/debezium-0-9-1-final-released/index.html b/blog/2019/02/13/debezium-0-9-1-final-released/index.html index 961bfd0c09..456f1bff14 100644 --- a/blog/2019/02/13/debezium-0-9-1-final-released/index.html +++ b/blog/2019/02/13/debezium-0-9-1-final-released/index.html @@ -1 +1 @@ - Debezium 0.9.1.Final Released

Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

In terms of new features, there is a new container image provided on Docker Hub now: the debezium/tooling image contains a couple of open-source CLI tools (currently kafkacat, httpie, jq, mycli and pqcli) which greatly help when working with Debezium connectors, Apache Kafka and Kafka Connect on the command line (DBZ-1125). A big thank you to the respective authors these fantastic tools!

CLI tools for working with Debezium

Altogether, 12 issues were resolved in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures etc.

Thanks a lot to community members Ivan Lorenz and Tomaz Lemos Fernandes for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.1.Final Released

Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

In terms of new features, there is a new container image provided on Docker Hub now: the debezium/tooling image contains a couple of open-source CLI tools (currently kafkacat, httpie, jq, mycli and pqcli) which greatly help when working with Debezium connectors, Apache Kafka and Kafka Connect on the command line (DBZ-1125). A big thank you to the respective authors these fantastic tools!

CLI tools for working with Debezium

Altogether, 12 issues were resolved in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures etc.

Thanks a lot to community members Ivan Lorenz and Tomaz Lemos Fernandes for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/02/13/debezium-webinar-at-devnation-live/index.html b/blog/2019/02/13/debezium-webinar-at-devnation-live/index.html index 2390f424bf..ef4c75d224 100644 --- a/blog/2019/02/13/debezium-webinar-at-devnation-live/index.html +++ b/blog/2019/02/13/debezium-webinar-at-devnation-live/index.html @@ -1 +1 @@ - Debezium at DevNation Live

Last week I had the pleasure to do a webinar on change data streaming patterns for microservices with the fabulous Burr Sutter at DevNation Live.

The recording of that 30 min session is available on YouTube now. It also contains a demo that shows how to set-up a data streaming pipeline with Debezium and Apache Kafka, running on OpenShift. The demo begins at 12 min 40 into the recording.

Enjoy!

 
You can also find the slide deck (in a slightly extended version) on Speaker Deck:  
 

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium at DevNation Live

Last week I had the pleasure to do a webinar on change data streaming patterns for microservices with the fabulous Burr Sutter at DevNation Live.

The recording of that 30 min session is available on YouTube now. It also contains a demo that shows how to set-up a data streaming pipeline with Debezium and Apache Kafka, running on OpenShift. The demo begins at 12 min 40 into the recording.

Enjoy!

 
You can also find the slide deck (in a slightly extended version) on Speaker Deck:  
 

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/index.html b/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/index.html index 95fbe7ac3a..f2752e3e2a 100644 --- a/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/index.html +++ b/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/index.html @@ -254,4 +254,4 @@ public boolean alreadyProcessed(UUID eventId) { return entityManager.find(ConsumedMessage.class, eventId) != null; } -}

That way, should the transaction be rolled back for some reason, also the original message will not be marked as processed and an exception would bubble up to the Kafka event consumer loop. This allows for re-trying to process the message later on.

Note that a more complete implementation should take care of re-trying given messages only for a certain number of times, before re-routing any unprocessable messages to a dead-letter queue or similar. Also there should be some house-keeping on the message log table; periodically, all events older than the consumer’s current offset committed with the broker may be deleted, as it’s ensured that such messages won’t be propagated to the consumer another time.

Summary

The outbox pattern is a great way for propagating data amongst different microservices.

By only modifying a single resource - the source service’s own database - it avoids any potential inconsistencies of altering multiple resources at the same time which don’t share one common transactional context (the database and Apache Kafka). By writing to the database first, the source service has instant "read your own writes" semantics, which is important for a consistent user experience, allowing query methods invoked following to a write to instantly reflect any data changes.

At the same time, the pattern enables asynchronous event propagation to other microservices. Apache Kafka acts as a highly scalable and reliable backbone for the messaging amongst the services. Given the right topic retention settings, new consumers may come up long after an event has been originally produced, and build up their own local state based on the event history.

Putting Apache Kafka into the center of the overall architecture also ensures a decoupling of involved services. If for instance single components of the solution fail or are not available for some time, e.g. during an update, events will simply be processed later on: after a restart, the Debezium connector will continue to tail the outbox table from the point where it left off before. Similarly, any consumer will continue to process topics from its previous offset. By keeping track of already successfully processed messages, duplicates can be detected and excluded from repeated handling.

Naturally, such event pipeline between different services is eventually consistent, i.e. consumers such as the shipping service may lag a bit behind producers such as the order service. Usually, that’s just fine, though, and can be handled in terms of the application’s business logic. For instance there’ll typically be no need to create a shipment within the very same second as an order has been placed. Also, end-to-end delays of the overall solution are typically low (seconds or even sub-second range), thanks to log-based change data capture which allows for emission of events in near-realtime.

One last thing to keep in mind is that the structure of the events exposed via the outbox should be considered a part of the emitting service’s API. I.e. when needed, their structure should be adjusted carefully and with compatibility considerations in mind. This is to ensure to not accidentally break any consumers when upgrading the producing service. At the same time, consumers should be lenient when handling messages and for instance not fail when encountering unknown attributes within received events.

Many thanks to Hans-Peter Grahsl, Jiri Pechanec, Justin Holmes and René Kerner for their feedback while writing this post!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +}

That way, should the transaction be rolled back for some reason, also the original message will not be marked as processed and an exception would bubble up to the Kafka event consumer loop. This allows for re-trying to process the message later on.

Note that a more complete implementation should take care of re-trying given messages only for a certain number of times, before re-routing any unprocessable messages to a dead-letter queue or similar. Also there should be some house-keeping on the message log table; periodically, all events older than the consumer’s current offset committed with the broker may be deleted, as it’s ensured that such messages won’t be propagated to the consumer another time.

Summary

The outbox pattern is a great way for propagating data amongst different microservices.

By only modifying a single resource - the source service’s own database - it avoids any potential inconsistencies of altering multiple resources at the same time which don’t share one common transactional context (the database and Apache Kafka). By writing to the database first, the source service has instant "read your own writes" semantics, which is important for a consistent user experience, allowing query methods invoked following to a write to instantly reflect any data changes.

At the same time, the pattern enables asynchronous event propagation to other microservices. Apache Kafka acts as a highly scalable and reliable backbone for the messaging amongst the services. Given the right topic retention settings, new consumers may come up long after an event has been originally produced, and build up their own local state based on the event history.

Putting Apache Kafka into the center of the overall architecture also ensures a decoupling of involved services. If for instance single components of the solution fail or are not available for some time, e.g. during an update, events will simply be processed later on: after a restart, the Debezium connector will continue to tail the outbox table from the point where it left off before. Similarly, any consumer will continue to process topics from its previous offset. By keeping track of already successfully processed messages, duplicates can be detected and excluded from repeated handling.

Naturally, such event pipeline between different services is eventually consistent, i.e. consumers such as the shipping service may lag a bit behind producers such as the order service. Usually, that’s just fine, though, and can be handled in terms of the application’s business logic. For instance there’ll typically be no need to create a shipment within the very same second as an order has been placed. Also, end-to-end delays of the overall solution are typically low (seconds or even sub-second range), thanks to log-based change data capture which allows for emission of events in near-realtime.

One last thing to keep in mind is that the structure of the events exposed via the outbox should be considered a part of the emitting service’s API. I.e. when needed, their structure should be adjusted carefully and with compatibility considerations in mind. This is to ensure to not accidentally break any consumers when upgrading the producing service. At the same time, consumers should be lenient when handling messages and for instance not fail when encountering unknown attributes within received events.

Many thanks to Hans-Peter Grahsl, Jiri Pechanec, Justin Holmes and René Kerner for their feedback while writing this post!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/02/25/debezium-0-9-2-final-released/index.html b/blog/2019/02/25/debezium-0-9-2-final-released/index.html index b4cf72e069..f000889793 100644 --- a/blog/2019/02/25/debezium-0-9-2-final-released/index.html +++ b/blog/2019/02/25/debezium-0-9-2-final-released/index.html @@ -1 +1 @@ - Debezium 0.9.2.Final Released

The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

A couple of fixes relate to the Debezium Postgres connector:

  • When not using REPLICA IDENTITY FULL, certain data types could trigger exceptions for update or delete events; those are fixed now (DBZ-1141, DBZ-1149)

  • The connector won’t fail any longer when encountering a change to a row with an unaltered TOAST column value (DBZ-1146)

Also the Debezium MySQL connector saw a number of fixes:

  • The connector works correctly now when using GTIDs and ANSI_QUOTES SQL mode (DBZ-1147)

  • The new Antlr-based DDL parsers can handle column names that are key words such as MEDIUM (DBZ-1150)

  • TIME columns with a default value larger than 23:59:59 can be exported now (DBZ-1137)

Another important fix was done in the Debezium connector for SQL Server, where the connector archive deployed to Maven Central accidentally contained all test-scoped and provided-scoped dependencies. This has been resolved now, so the connector archive only contains the actually needed JARs and thus is much smaller (DBZ-1138).

New Features

The 0.9.2 release also comes with two small new features:

  • You can pass arbitrary parameters to the logical decoding plug-in used by the Postgres connector; this can for instance be used with wal2json to limit the number of tables to capture on the server side (DBZ-1130)

  • The MongoDB connector now has the long-awaited snapshotting mode NEVER (DBZ-867), i.e. you can set up a new connector without taking an initial snapshot and instantly beginning streaming changes from the oplog

Version Updates

As of this release, Debezium has been upgraded to Apache Kafka 2.1.1. Amongst others, this release fixes an issue where the Kafka Connect REST API would expose connector credentials also when those were configured via secrets (KAFKA-5117). We’ve also upgraded the binlog client used by the MySQL connector to version 0.19.0 (DBZ-1140), which fixes a bug that had caused exceptions during rebalancing the connector before (DBZ-1132).

Check out the release notes for the complete list of issues fixed in Debezium 0.9.2.

Many thanks to Debezium community members Andrey Pustovetov, Keith Barber, Krizhan Mariampillai and Taylor Rolison for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.2.Final Released

The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

A couple of fixes relate to the Debezium Postgres connector:

  • When not using REPLICA IDENTITY FULL, certain data types could trigger exceptions for update or delete events; those are fixed now (DBZ-1141, DBZ-1149)

  • The connector won’t fail any longer when encountering a change to a row with an unaltered TOAST column value (DBZ-1146)

Also the Debezium MySQL connector saw a number of fixes:

  • The connector works correctly now when using GTIDs and ANSI_QUOTES SQL mode (DBZ-1147)

  • The new Antlr-based DDL parsers can handle column names that are key words such as MEDIUM (DBZ-1150)

  • TIME columns with a default value larger than 23:59:59 can be exported now (DBZ-1137)

Another important fix was done in the Debezium connector for SQL Server, where the connector archive deployed to Maven Central accidentally contained all test-scoped and provided-scoped dependencies. This has been resolved now, so the connector archive only contains the actually needed JARs and thus is much smaller (DBZ-1138).

New Features

The 0.9.2 release also comes with two small new features:

  • You can pass arbitrary parameters to the logical decoding plug-in used by the Postgres connector; this can for instance be used with wal2json to limit the number of tables to capture on the server side (DBZ-1130)

  • The MongoDB connector now has the long-awaited snapshotting mode NEVER (DBZ-867), i.e. you can set up a new connector without taking an initial snapshot and instantly beginning streaming changes from the oplog

Version Updates

As of this release, Debezium has been upgraded to Apache Kafka 2.1.1. Amongst others, this release fixes an issue where the Kafka Connect REST API would expose connector credentials also when those were configured via secrets (KAFKA-5117). We’ve also upgraded the binlog client used by the MySQL connector to version 0.19.0 (DBZ-1140), which fixes a bug that had caused exceptions during rebalancing the connector before (DBZ-1132).

Check out the release notes for the complete list of issues fixed in Debezium 0.9.2.

Many thanks to Debezium community members Andrey Pustovetov, Keith Barber, Krizhan Mariampillai and Taylor Rolison for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/03/14/debezium-meets-quarkus/index.html b/blog/2019/03/14/debezium-meets-quarkus/index.html index 0a3f46a208..a2f79deaf3 100644 --- a/blog/2019/03/14/debezium-meets-quarkus/index.html +++ b/blog/2019/03/14/debezium-meets-quarkus/index.html @@ -61,4 +61,4 @@ smallrye.messaging.source.orders.bootstrap.servers=kafka:9092 smallrye.messaging.source.orders.key.deserializer=org.apache.kafka.common.serialization.StringDeserializer smallrye.messaging.source.orders.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer -smallrye.messaging.source.orders.group.id=shipment-service-quarkus

In our case it contains

Execution

The Docker Compose config file has been enriched with two services, MariaDB and the new Quarkus-based shipment service. So when docker-compose up is executed, two shipment services are started side-by-side: the original Thorntail-based one and the new one using Quarkus. When the order services receives a new purchase order and exports a corresponding event to Apache Kafka via the outbox table, that message is processed by both shipment services, as they are using distinct consumer group ids.

Performance Numbers

The numbers are definitely not scientific, but provide a good indication of the order-of-magnitude difference between the native Quarkus-based application and the Thorntail service running on the JVM:

Quarkus service Thorntail service

memory [MB]

33.8

1257

start time [ms]

260

5746

application package size [MB]

54

131

The memory data were obtained via htop utility. The startup time was measured till the message about application readiness was printed. As with all performance measurements, you should run your own comparisons based on your set-up and workload to gain insight into the actual differences for your specific use cases.

Summary

In this post we have successfully demonstrated that it is possible to consume Debezium-generated events in a Java application written with the Quarkus Java stack. We have also shown that it is possible to provide such application as a binary image and provided back-of-the-envelope performance numbers demonstrating significant savings in resources.

If you’d like to see the awesomeness of deploying Java microservices as native images by yourself, you can find the complete source code of the implementation in the Debezium examples repo. If you got any questions or feedback, please let us know in the comments below; looking forward to hearing from you!

Many thanks to Guillaume Smet for reviewing an earlier version of this post!

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +smallrye.messaging.source.orders.group.id=shipment-service-quarkus

In our case it contains

Execution

The Docker Compose config file has been enriched with two services, MariaDB and the new Quarkus-based shipment service. So when docker-compose up is executed, two shipment services are started side-by-side: the original Thorntail-based one and the new one using Quarkus. When the order services receives a new purchase order and exports a corresponding event to Apache Kafka via the outbox table, that message is processed by both shipment services, as they are using distinct consumer group ids.

Performance Numbers

The numbers are definitely not scientific, but provide a good indication of the order-of-magnitude difference between the native Quarkus-based application and the Thorntail service running on the JVM:

Quarkus service Thorntail service

memory [MB]

33.8

1257

start time [ms]

260

5746

application package size [MB]

54

131

The memory data were obtained via htop utility. The startup time was measured till the message about application readiness was printed. As with all performance measurements, you should run your own comparisons based on your set-up and workload to gain insight into the actual differences for your specific use cases.

Summary

In this post we have successfully demonstrated that it is possible to consume Debezium-generated events in a Java application written with the Quarkus Java stack. We have also shown that it is possible to provide such application as a binary image and provided back-of-the-envelope performance numbers demonstrating significant savings in resources.

If you’d like to see the awesomeness of deploying Java microservices as native images by yourself, you can find the complete source code of the implementation in the Debezium examples repo. If you got any questions or feedback, please let us know in the comments below; looking forward to hearing from you!

Many thanks to Guillaume Smet for reviewing an earlier version of this post!

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/03/26/debezium-0-9-3-final-released/index.html b/blog/2019/03/26/debezium-0-9-3-final-released/index.html index 7890976c5e..390b4cf1d8 100644 --- a/blog/2019/03/26/debezium-0-9-3-final-released/index.html +++ b/blog/2019/03/26/debezium-0-9-3-final-released/index.html @@ -1 +1 @@ - Debezium 0.9.3.Final Released

The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

Container images will be released with a small delay due to some Docker Hub configuration issues.

New Features

The 0.9.3 release comes with two larger new features:

  • A feature request was made to execute a partial recovery of the replication process after losing the replication slot with the PostgreSQL database, e.g. after failing over to a secondary database host (DBZ-1082). Instead of adding yet another snapshotting mode, we took a step back and decided to make the Postgres snapshotting process more customizable by introducing a service provider interface (SPI). This lets you implement and register your own Java class for controlling the snaphotting process. See the issue description of DBZ-1082 for one possible custom implementation of this SPI, which is based on Postgres' catalog_xmin property and selects all records altered after the last known xmin position. To learn more about the SPI, see the the Snapshotter contract. Note that the feature is still in incubating phase and the SPI should be considered unstable for the time being.

  • Not long ago we published blogpost about implementing the outbox pattern with Debezium for propagating data changes between microservices. Community member Renato Mefi expanded the idea and created a ready-made implementation of the single message transform (SMT) described in the post for routing events from the outbox table to specific topics. This SMT is part of the Debezium core library now (DBZ-1169). Its usage will be described in the documentation soon; for the time being please refer to the EventRouter type and the accompanying configuration class.

Bug fixes

We did a couple of fixes related to the Debezium Postgres connector:

  • A regression that introduced a deadlock in snapshotting process has been fixed (DBZ-1161)

  • The hstore datatype works correctly in snapshot phase (DBZ-1162)

  • The wal2json plug-in processes also empty events (DBZ-1181) as e.g. originating from materialize view updates; this should help to resolve some of the issues where log files in Postgres couldn’t be discarded due to Debezium’s replication slot not advancing.

  • The commit time is propely converted to microseconds (DBZ-1174)

Also the Debezium MySQL connector saw a number of fixes especially in SQL parser:

  • The SERIAL datatype and default value is now supported (DBZ-1185)

  • A specific detail in the MySQL grammar that allows to enumerate table options in ALTER TABLE without comma works (DBZ-1186)

  • A false alarm for empty MySQL password is no longer reported (DBZ-1188)

  • It is no longer necessary to create history topic manually for broker without default topic replication value (DBZ-1179)

It is now possible to process multiple schemas with a single Oracle connector (DBZ-1166).

Check out the release notes for the complete list of issues fixed in Debezium 0.9.3.

Many thanks to Debezium community members Renato Mefi, Shubham Rawat, Addison Higham, Jon Casstevens, Ashar Hassan and Josh Stanfield for their contributions to this release!

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.3.Final Released

The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

Container images will be released with a small delay due to some Docker Hub configuration issues.

New Features

The 0.9.3 release comes with two larger new features:

  • A feature request was made to execute a partial recovery of the replication process after losing the replication slot with the PostgreSQL database, e.g. after failing over to a secondary database host (DBZ-1082). Instead of adding yet another snapshotting mode, we took a step back and decided to make the Postgres snapshotting process more customizable by introducing a service provider interface (SPI). This lets you implement and register your own Java class for controlling the snaphotting process. See the issue description of DBZ-1082 for one possible custom implementation of this SPI, which is based on Postgres' catalog_xmin property and selects all records altered after the last known xmin position. To learn more about the SPI, see the the Snapshotter contract. Note that the feature is still in incubating phase and the SPI should be considered unstable for the time being.

  • Not long ago we published blogpost about implementing the outbox pattern with Debezium for propagating data changes between microservices. Community member Renato Mefi expanded the idea and created a ready-made implementation of the single message transform (SMT) described in the post for routing events from the outbox table to specific topics. This SMT is part of the Debezium core library now (DBZ-1169). Its usage will be described in the documentation soon; for the time being please refer to the EventRouter type and the accompanying configuration class.

Bug fixes

We did a couple of fixes related to the Debezium Postgres connector:

  • A regression that introduced a deadlock in snapshotting process has been fixed (DBZ-1161)

  • The hstore datatype works correctly in snapshot phase (DBZ-1162)

  • The wal2json plug-in processes also empty events (DBZ-1181) as e.g. originating from materialize view updates; this should help to resolve some of the issues where log files in Postgres couldn’t be discarded due to Debezium’s replication slot not advancing.

  • The commit time is propely converted to microseconds (DBZ-1174)

Also the Debezium MySQL connector saw a number of fixes especially in SQL parser:

  • The SERIAL datatype and default value is now supported (DBZ-1185)

  • A specific detail in the MySQL grammar that allows to enumerate table options in ALTER TABLE without comma works (DBZ-1186)

  • A false alarm for empty MySQL password is no longer reported (DBZ-1188)

  • It is no longer necessary to create history topic manually for broker without default topic replication value (DBZ-1179)

It is now possible to process multiple schemas with a single Oracle connector (DBZ-1166).

Check out the release notes for the complete list of issues fixed in Debezium 0.9.3.

Many thanks to Debezium community members Renato Mefi, Shubham Rawat, Addison Higham, Jon Casstevens, Ashar Hassan and Josh Stanfield for their contributions to this release!

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/04/11/debezium-0-9-4-final-released/index.html b/blog/2019/04/11/debezium-0-9-4-final-released/index.html index 90c25fa05b..9eb73cbfea 100644 --- a/blog/2019/04/11/debezium-0-9-4-final-released/index.html +++ b/blog/2019/04/11/debezium-0-9-4-final-released/index.html @@ -1 +1 @@ - Debezium 0.9.4.Final Released

It’s my pleasure to announce the release of Debezium 0.9.4.Final!

This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

MySQL Connector Improvements

The Debezium connector for MySQL comes with two new metrics:

  • Whether GTID is enabled for offset tracking or not (DBZ-1221)

  • Number of filtered events (DBZ-1206)

It also supports database connections using TLS 1.2 (DBZ-1208) now.

New Postgres Datatypes

The Postgres connector now allows to capture changes to columns of the CIDR and INET types (DBZ-1189).

Bug Fixes

The fixed bugs include the following:

  • Closing connection after snapshotting (DBZ-1218)

  • Can parse ALTER statement affecting enum column with character set options (DBZ-1203)

  • Avoiding timeout after bootstrapping a new table (DBZ-1207)

Check out the release notes for the complete list of issues fixed in Debezium 0.9.4.

Many thanks to Debezium community members Andrey Pustovetov, Jordan Bragg, Joy Gao, Preethi Sadagopan, Renato Mefi, Sasha Kovryga, Shubham Rawat and Stephen Powis for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.4.Final Released

It’s my pleasure to announce the release of Debezium 0.9.4.Final!

This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

MySQL Connector Improvements

The Debezium connector for MySQL comes with two new metrics:

  • Whether GTID is enabled for offset tracking or not (DBZ-1221)

  • Number of filtered events (DBZ-1206)

It also supports database connections using TLS 1.2 (DBZ-1208) now.

New Postgres Datatypes

The Postgres connector now allows to capture changes to columns of the CIDR and INET types (DBZ-1189).

Bug Fixes

The fixed bugs include the following:

  • Closing connection after snapshotting (DBZ-1218)

  • Can parse ALTER statement affecting enum column with character set options (DBZ-1203)

  • Avoiding timeout after bootstrapping a new table (DBZ-1207)

Check out the release notes for the complete list of issues fixed in Debezium 0.9.4.

Many thanks to Debezium community members Andrey Pustovetov, Jordan Bragg, Joy Gao, Preethi Sadagopan, Renato Mefi, Sasha Kovryga, Shubham Rawat and Stephen Powis for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/04/18/hello-debezium/index.html b/blog/2019/04/18/hello-debezium/index.html index 4d04d8d66f..cef65e7fbc 100644 --- a/blog/2019/04/18/hello-debezium/index.html +++ b/blog/2019/04/18/hello-debezium/index.html @@ -1 +1 @@ - Debezium's Team Grows

Hello everyone, my name is Chris Cranford and I recently joined the Debezium team.

My journey at Red Hat began just over three years ago; however I have been in this line of work for nearly twenty years. All throughout my career, I have advocated and supported open source software. Many of my initial software endeavors were based on open source software, several which are still heavily used today such as Hibernate ORM.

When I first joined Red Hat, I had the pleasure to work on the Hibernate ORM team. I had been an end user of the project since 2.0, so it was an excellent fit to be able to contribute full time to a project that had served me well in the corporate world n-times over.

It wasn’t long ago when @gunnarmorling and I had a brief exchange about Debezium. I had not heard of the project and I was super stoked because I immediately saw parallel in its goals and Hibernate Envers, a change data capture solution that is based on Hibernate’s event framework that I was currently maintaining.

I believe one of my first "wow" moments was when I realized how well Debezium fits into the micro-services world. The idea of being able to share data between micro-services in a very decoupled way is a massive win for building reusable components and minimizes technical debt.

Debezium just felt like the next logical step. There are so many new and exciting things to come and the team and myself cannot wait to share them.

So lets get started!

--Chris

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium's Team Grows

Hello everyone, my name is Chris Cranford and I recently joined the Debezium team.

My journey at Red Hat began just over three years ago; however I have been in this line of work for nearly twenty years. All throughout my career, I have advocated and supported open source software. Many of my initial software endeavors were based on open source software, several which are still heavily used today such as Hibernate ORM.

When I first joined Red Hat, I had the pleasure to work on the Hibernate ORM team. I had been an end user of the project since 2.0, so it was an excellent fit to be able to contribute full time to a project that had served me well in the corporate world n-times over.

It wasn’t long ago when @gunnarmorling and I had a brief exchange about Debezium. I had not heard of the project and I was super stoked because I immediately saw parallel in its goals and Hibernate Envers, a change data capture solution that is based on Hibernate’s event framework that I was currently maintaining.

I believe one of my first "wow" moments was when I realized how well Debezium fits into the micro-services world. The idea of being able to share data between micro-services in a very decoupled way is a massive win for building reusable components and minimizes technical debt.

Debezium just felt like the next logical step. There are so many new and exciting things to come and the team and myself cannot wait to share them.

So lets get started!

--Chris

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/05/06/debezium-0-9-5-final-released/index.html b/blog/2019/05/06/debezium-0-9-5-final-released/index.html index d65c11ff69..1b749762cc 100644 --- a/blog/2019/05/06/debezium-0-9-5-final-released/index.html +++ b/blog/2019/05/06/debezium-0-9-5-final-released/index.html @@ -1 +1 @@ - Debezium 0.9.5.Final Released

It’s my pleasure to announce the release of Debezium 0.9.5.Final!

This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

Apache Kafka Update and New Features

This release has been built against and tested with Apache Kafka 2.2.0 (DBZ-1227). Earlier versions are continued to be supported as well.

For all the connectors it is possible now to specify the batch size when taking snapshots (DBZ-1247). The new connector option snapshot.fetch.size has been introduced for that. This option replaces the earlier option rows.fetch.size which existed in some of the connectors and which will be removed in Debezium 0.10. Existing connector instances should therefore be re-configured to use the new option.

Continuing the work from Debezium 0.9.4, the Postgres connector supports some more column types: MACADDR and MACADDR8 (DBZ-1193) as well as INT4RANGE, INT8RANGE and NUMRANGE (DBZ-1076).

Fixes

Amongst others, this release includes the following fixes:

  • Failing to specify value for database.server.name results in invalid Kafka topic name (DBZ-212)

  • Postgres Connector times out in schema discovery for DBs with many tables (DBZ-1214)

  • Oracle connector: JDBC transaction can only capture single DML record (DBZ-1223)

  • Lost precision for timestamp with timezone (DBZ-1236)

  • NullpointerException due to optional value for commitTime (DBZ-1241)

  • Default value for datetime(0) is incorrectly handled (DBZ-1243)

  • Microsecond precision is lost when reading timetz data from Postgres (DBZ-1260)

Please refer to the release notes for the complete list of issues fixed in Debezium 0.9.5.

We’re very thankful to the following community members who contributed to this release: Addison Higham, Andrey Pustovetov, Jork Zijlstra, Krizhan Mariampillai, Mathieu Rozieres and Shubham Rawat.

Outlook

This release is planned to be the last in the 0.9 line.

We’re now going to focus on Debezium 0.10, whose main topic will be to clean up a few things: we’d like to remove a few deprecated options and features (e.g. the legacy DDL parser in the MySQL connector). We’re also planning to do a thorough review of the event structure of the different connectors; for instance in the source block of CDC messages there are a some field names that should be unified. We believe users will benefit from a more consistent experience across the connectors.

Another focus area will be to migrate the existing Postgres connector to the framework classes established for the SQL Server and Oracle connectors. This will allow to expose some new features for the Postgres connector, e.g. the monitoring capabilities already rolled out for the other two connectors.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.9.5.Final Released

It’s my pleasure to announce the release of Debezium 0.9.5.Final!

This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

Apache Kafka Update and New Features

This release has been built against and tested with Apache Kafka 2.2.0 (DBZ-1227). Earlier versions are continued to be supported as well.

For all the connectors it is possible now to specify the batch size when taking snapshots (DBZ-1247). The new connector option snapshot.fetch.size has been introduced for that. This option replaces the earlier option rows.fetch.size which existed in some of the connectors and which will be removed in Debezium 0.10. Existing connector instances should therefore be re-configured to use the new option.

Continuing the work from Debezium 0.9.4, the Postgres connector supports some more column types: MACADDR and MACADDR8 (DBZ-1193) as well as INT4RANGE, INT8RANGE and NUMRANGE (DBZ-1076).

Fixes

Amongst others, this release includes the following fixes:

  • Failing to specify value for database.server.name results in invalid Kafka topic name (DBZ-212)

  • Postgres Connector times out in schema discovery for DBs with many tables (DBZ-1214)

  • Oracle connector: JDBC transaction can only capture single DML record (DBZ-1223)

  • Lost precision for timestamp with timezone (DBZ-1236)

  • NullpointerException due to optional value for commitTime (DBZ-1241)

  • Default value for datetime(0) is incorrectly handled (DBZ-1243)

  • Microsecond precision is lost when reading timetz data from Postgres (DBZ-1260)

Please refer to the release notes for the complete list of issues fixed in Debezium 0.9.5.

We’re very thankful to the following community members who contributed to this release: Addison Higham, Andrey Pustovetov, Jork Zijlstra, Krizhan Mariampillai, Mathieu Rozieres and Shubham Rawat.

Outlook

This release is planned to be the last in the 0.9 line.

We’re now going to focus on Debezium 0.10, whose main topic will be to clean up a few things: we’d like to remove a few deprecated options and features (e.g. the legacy DDL parser in the MySQL connector). We’re also planning to do a thorough review of the event structure of the different connectors; for instance in the source block of CDC messages there are a some field names that should be unified. We believe users will benefit from a more consistent experience across the connectors.

Another focus area will be to migrate the existing Postgres connector to the framework classes established for the SQL Server and Oracle connectors. This will allow to expose some new features for the Postgres connector, e.g. the monitoring capabilities already rolled out for the other two connectors.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/05/23/tutorial-using-debezium-connectors-with-apache-pulsar/index.html b/blog/2019/05/23/tutorial-using-debezium-connectors-with-apache-pulsar/index.html index f217bbd7b0..bbbc35cff5 100644 --- a/blog/2019/05/23/tutorial-using-debezium-connectors-with-apache-pulsar/index.html +++ b/blog/2019/05/23/tutorial-using-debezium-connectors-with-apache-pulsar/index.html @@ -52,4 +52,4 @@ $ docker kill 84d66c2f591d

To delete Pulsar data, delete data directory in the Pulsar binary directory.

$ pwd
 /Users/jia/ws/releases/apache-pulsar-2.3.0
 
-$ rm -rf data

Conclusion

The Pulsar IO framework allows to run the Debezium connectors for change data capture, streaming data changes from different databases into Apache Pulsar. In this tutorial you’ve learned how to capture data changes in a MySQL database and propagate them to Pulsar. We are improving support for running the Debezium connectors with Apache Pulsar continuously, it will be much easier to use after Pulsar 2.4.0 release.

Jia Zhai

Jia is a core software engineer at StreamNative, as well as PMC member of both Apache BookKeeper and Apache Pulsar, and contributes to these 2 projects continually. He lives in Beijing, China.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +$ rm -rf data

Conclusion

The Pulsar IO framework allows to run the Debezium connectors for change data capture, streaming data changes from different databases into Apache Pulsar. In this tutorial you’ve learned how to capture data changes in a MySQL database and propagate them to Pulsar. We are improving support for running the Debezium connectors with Apache Pulsar continuously, it will be much easier to use after Pulsar 2.4.0 release.

Jia Zhai

Jia is a core software engineer at StreamNative, as well as PMC member of both Apache BookKeeper and Apache Pulsar, and contributes to these 2 projects continually. He lives in Beijing, China.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/05/29/debezium-0-10-0-alpha1-released/index.html b/blog/2019/05/29/debezium-0-10-0-alpha1-released/index.html index 3b8f38d003..c55eb0e59c 100644 --- a/blog/2019/05/29/debezium-0-10-0-alpha1-released/index.html +++ b/blog/2019/05/29/debezium-0-10-0-alpha1-released/index.html @@ -1 +1 @@ - Debezium 0.10.0.Alpha1 "Spring Clean-Up" Edition Released

I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

Why?

First of all, let’s discuss a bit why we’re doing these changes.

Over the last three years, Debezium has grown from supporting just a single database into an entire family of CDC connectors for a range of different relational databases and MongoDB, as well as accompanying components such as message transformations for topic routing or implementing the outbox pattern.

As in any mature project, over time we figured that a few things should be done differently in the code base than we had thought at first. For instance we moved from a hand-written parser for processing MySQL DDL statements to a much more robust implementation based on Antlr. Also we realized the way certain temporal column types were exported was at risk of value overflow in certain conditions, so we added a new mode not prone to these issues. As a last example, we made options like the batch size used during snapshotting consistent across the different connectors.

Luckily, Debezium quickly gained traction and despite the 0.x version number, it is used heavily in production at a large number of organizations, and users rely on its stability. So whenever we did such changes, we aimed at making the upgrade experience as smooth as possible; usually that means that the previous behavior is still available but is marked as deprecated in the documentation, while a new improved option, implementation etc. is added and made the default behavior.

At the same time we realized that there are a couple of differences between the connectors which shouldn’t really be there. Specifically, the source block of change events has some differences which make a uniform handling by consumers more complex than it should be; for instance the timestamp field is named "ts_sec" in MySQL events but "ts_usec" for Postgres.

With all this in mind, we decided that it is about time to clean up these issues. This done for a couple of purposes:

  • Keeping the code base maintainable and open for future development by removing legacy code such as deprecated options and their handling as well as the legacy MySQL DDL parser

  • Making CDC events from different connectors easier to consume by unifying the source block created by the different connectors as far as possible

  • Preparing the project to go to version 1.0 with an even stronger promise of retaining backwards compatibility than already practiced today

What?

Now as we have discussed why we feel it’s time for some "clean-up", let’s take a closer look at the most relevant changes. Please also refer to the "breaking changes" section of the migration notes for more details.

  • The legacy DDL parser for MySQL has been removed (DBZ-736); if you are not using the Antlr-based one yet (it was introduced in 0.8 and became the default in 0.9), it’s highly recommended that you test it with your databases. Should you run into any parsing errors, please report them so we can fix them for the 0.10 Final release.

  • The SMTs for retrieving the new record/document state from change events have been renamed from io.debezium.transforms.UnwrapFromEnvelope and io.debezium.connector.mongodb.transforms.UnwrapFromMongoDbEnvelope into ExtractNewRecordState and ExtractNewDocumentState, respectively (DBZ-677). The old names can still be used as of 0.10, but doing so will raise a warning. They are planned for removal in Debezium 0.11.

  • Several connector options that were deprecated in earlier Debezium versions have been removed (DBZ-1234): the drop.deletes option of new record/document state extraction SMTs (superseded by delete.handling.mode option), the rows.fetch.size option (superseded by snapshot.fetch.size), the adaptive value of time.precision.mode option for MySQL (prone to value loss, use adaptive_microseconds instead) and the snapshot.minimal.locks for the MySQL connector (superseded by snapshot.locking.mode)

  • Several option names of the (incubating) SMT for the outbox pattern have been renamed for the sake of consistency (DBZ-1289)

  • Several fields within the source block of CDC events have been renamed for the sake of consistency (DBZ-596); as this is technically a backwards-incompatible change when using Avro and the schema registry, we’ve added a connector option source.struct.version which, when set to the value v1, will have connectors produce the previous source structure. v2 is the default and any consumers should be adjusted to work with the new source structure as soon as possible.

New Features and Bugfixes

Besides these changes, the 0.10.0.Alpha1 release also contains some feature additions and bug fixes:

  • The SQL Server connector supports custom SELECT statements for snapshotting (DBZ-1224)

  • database, schema and table/collection names have been added consistently to the source block for CDC events from all connectors (DBZ-875)

  • Client authentication works for the MySQL connector(DBZ-1228)

  • The embedded engine doesn’t duplicate events after restarts any longer (DBZ-1276)

  • A parser bug related to CREATE INDEX statements was fixed (DBZ-1264)

Overall, 30 issues were addressed in this release. Many thanks to Arkoprabho Chakraborti, Ram Satish and Yuchao Wang for their contributions to this release!

Speaking of contributors, we did some housekeeping to the list of everyone ever contributing to Debezium, too. Not less than exactly 111 individuals have contributed code up to this point, which is just phenomenal! Thank you so much everyone, you folks rock!

Outlook

Going forward, there are some more details we’d like to unify across the different connectors before going to Debezium 0.10 Final. For instance the source attribute snapshot will be changed so it can take one of three states: true, false or last (indicating that this event is the last one created during initial snapshotting).

We’ll also continue our efforts to to migrate the existing Postgres connector to the framework classes established for the SQL Server and Oracle connectors. Another thing we’re actively exploring is how the Postgres could take advantage of the "logical replication" feature added in Postgres 10. This may provide us with a way to ingest change events without requiring a custom server-side logical decoding plug-in, which proves challenging in cloud environments where there’s typically just a limited set of logical decoding options available.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.10.0.Alpha1 "Spring Clean-Up" Edition Released

I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

Why?

First of all, let’s discuss a bit why we’re doing these changes.

Over the last three years, Debezium has grown from supporting just a single database into an entire family of CDC connectors for a range of different relational databases and MongoDB, as well as accompanying components such as message transformations for topic routing or implementing the outbox pattern.

As in any mature project, over time we figured that a few things should be done differently in the code base than we had thought at first. For instance we moved from a hand-written parser for processing MySQL DDL statements to a much more robust implementation based on Antlr. Also we realized the way certain temporal column types were exported was at risk of value overflow in certain conditions, so we added a new mode not prone to these issues. As a last example, we made options like the batch size used during snapshotting consistent across the different connectors.

Luckily, Debezium quickly gained traction and despite the 0.x version number, it is used heavily in production at a large number of organizations, and users rely on its stability. So whenever we did such changes, we aimed at making the upgrade experience as smooth as possible; usually that means that the previous behavior is still available but is marked as deprecated in the documentation, while a new improved option, implementation etc. is added and made the default behavior.

At the same time we realized that there are a couple of differences between the connectors which shouldn’t really be there. Specifically, the source block of change events has some differences which make a uniform handling by consumers more complex than it should be; for instance the timestamp field is named "ts_sec" in MySQL events but "ts_usec" for Postgres.

With all this in mind, we decided that it is about time to clean up these issues. This done for a couple of purposes:

  • Keeping the code base maintainable and open for future development by removing legacy code such as deprecated options and their handling as well as the legacy MySQL DDL parser

  • Making CDC events from different connectors easier to consume by unifying the source block created by the different connectors as far as possible

  • Preparing the project to go to version 1.0 with an even stronger promise of retaining backwards compatibility than already practiced today

What?

Now as we have discussed why we feel it’s time for some "clean-up", let’s take a closer look at the most relevant changes. Please also refer to the "breaking changes" section of the migration notes for more details.

  • The legacy DDL parser for MySQL has been removed (DBZ-736); if you are not using the Antlr-based one yet (it was introduced in 0.8 and became the default in 0.9), it’s highly recommended that you test it with your databases. Should you run into any parsing errors, please report them so we can fix them for the 0.10 Final release.

  • The SMTs for retrieving the new record/document state from change events have been renamed from io.debezium.transforms.UnwrapFromEnvelope and io.debezium.connector.mongodb.transforms.UnwrapFromMongoDbEnvelope into ExtractNewRecordState and ExtractNewDocumentState, respectively (DBZ-677). The old names can still be used as of 0.10, but doing so will raise a warning. They are planned for removal in Debezium 0.11.

  • Several connector options that were deprecated in earlier Debezium versions have been removed (DBZ-1234): the drop.deletes option of new record/document state extraction SMTs (superseded by delete.handling.mode option), the rows.fetch.size option (superseded by snapshot.fetch.size), the adaptive value of time.precision.mode option for MySQL (prone to value loss, use adaptive_microseconds instead) and the snapshot.minimal.locks for the MySQL connector (superseded by snapshot.locking.mode)

  • Several option names of the (incubating) SMT for the outbox pattern have been renamed for the sake of consistency (DBZ-1289)

  • Several fields within the source block of CDC events have been renamed for the sake of consistency (DBZ-596); as this is technically a backwards-incompatible change when using Avro and the schema registry, we’ve added a connector option source.struct.version which, when set to the value v1, will have connectors produce the previous source structure. v2 is the default and any consumers should be adjusted to work with the new source structure as soon as possible.

New Features and Bugfixes

Besides these changes, the 0.10.0.Alpha1 release also contains some feature additions and bug fixes:

  • The SQL Server connector supports custom SELECT statements for snapshotting (DBZ-1224)

  • database, schema and table/collection names have been added consistently to the source block for CDC events from all connectors (DBZ-875)

  • Client authentication works for the MySQL connector(DBZ-1228)

  • The embedded engine doesn’t duplicate events after restarts any longer (DBZ-1276)

  • A parser bug related to CREATE INDEX statements was fixed (DBZ-1264)

Overall, 30 issues were addressed in this release. Many thanks to Arkoprabho Chakraborti, Ram Satish and Yuchao Wang for their contributions to this release!

Speaking of contributors, we did some housekeeping to the list of everyone ever contributing to Debezium, too. Not less than exactly 111 individuals have contributed code up to this point, which is just phenomenal! Thank you so much everyone, you folks rock!

Outlook

Going forward, there are some more details we’d like to unify across the different connectors before going to Debezium 0.10 Final. For instance the source attribute snapshot will be changed so it can take one of three states: true, false or last (indicating that this event is the last one created during initial snapshotting).

We’ll also continue our efforts to to migrate the existing Postgres connector to the framework classes established for the SQL Server and Oracle connectors. Another thing we’re actively exploring is how the Postgres could take advantage of the "logical replication" feature added in Postgres 10. This may provide us with a way to ingest change events without requiring a custom server-side logical decoding plug-in, which proves challenging in cloud environments where there’s typically just a limited set of logical decoding options available.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/06/03/debezium-0-10-0-alpha2-released/index.html b/blog/2019/06/03/debezium-0-10-0-alpha2-released/index.html index daaee7512b..30f674f945 100644 --- a/blog/2019/06/03/debezium-0-10-0-alpha2-released/index.html +++ b/blog/2019/06/03/debezium-0-10-0-alpha2-released/index.html @@ -1 +1 @@ - Debezium 0.10.0.Alpha2 Released

Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

Apache ZooKeeper was upgraded to version 3.4.14 to fix a security vulnerability (CVE-2019-0201).

Our regular contributor Renato dived deeply into our image build scripts and enriched (DBZ-1279) them with a Dockerfile linter.

Schema change events include the table name(s) in the metadata describing which tables are affected by the change (DBZ-871).

Bartosz Miedlar has fixed a bug in MySQL ANTLR grammar causing issues with identifiers in backquotes (DBZ-1300).

What’s next?

We hope we will be able to keep the recent release cadence and get lout the first beta version of 0.10 in two weeks.

Stay tuned for more!

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.10.0.Alpha2 Released

Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

Apache ZooKeeper was upgraded to version 3.4.14 to fix a security vulnerability (CVE-2019-0201).

Our regular contributor Renato dived deeply into our image build scripts and enriched (DBZ-1279) them with a Dockerfile linter.

Schema change events include the table name(s) in the metadata describing which tables are affected by the change (DBZ-871).

Bartosz Miedlar has fixed a bug in MySQL ANTLR grammar causing issues with identifiers in backquotes (DBZ-1300).

What’s next?

We hope we will be able to keep the recent release cadence and get lout the first beta version of 0.10 in two weeks.

Stay tuned for more!

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/06/05/debezium-newsletter-01-2019/index.html b/blog/2019/06/05/debezium-newsletter-01-2019/index.html index 145b62737a..b4dc88456d 100644 --- a/blog/2019/06/05/debezium-newsletter-01-2019/index.html +++ b/blog/2019/06/05/debezium-newsletter-01-2019/index.html @@ -1 +1 @@ - Debezium's Newsletter 01/2019

Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

Articles

Gunnar Morling recently attended Kafka Summit in London where he gave a talk on Change Data Streaming Patterns for Microservices With Debezium. You can watch the full presentation here.

Strimzi provides an easy way to run Apache Kafka on Kubernetes or Openshift. This article by Sincy Sebastian shows just how simple it is to replicate change events from MySQL to Elastic Search using Debezium.

Debezium allows replicating data between heterogeneous data stores with ease. This article by Matthew Groves explains how you can replicate data from MySQL to CouchBase.

As the size of data that systems maintain continues to grow, this begins to impact how we capture, compute, and report real-time analytics. This article by Maria Patterson explains how you can use Debezium to stream data from Postgres, perform analytical calculations using KSQL, and then stream those results back to Postgres for consumption.

In a recent article published in Portuguese, Paulo Singaretti illustrates how they use Debezium and Kafka to stream changes from their relational database and then store the change stream results in Google Cloud Services.

This recent blog by Jia Zhai provides a complete tutorial showing how to use Debezium connectors with Apache Pulsar.

Time to upgrade

Debezium version 0.9.5 was just released. If you are using the 0.9 branch you should definitely check out 0.9.5. For details on the bug fixes as well as the enhancements this version includes, check out the release notes.

The Debezium team has also begun active development on the next major version, 0.10. We recently published a blog that provides an overview behind what 0.10 is meant to deliver. If you want details on the bug fixes and enhancements we’ve packed into this release, you can view the issue list.

Feedback

We intend to publish new additions of this newsletter periodically. Should anyone have any suggestions on changes or what could be highlighted here, we welcome that feedback. You can reach out to us via any of our community channels found here.

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium's Newsletter 01/2019

Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

Articles

Gunnar Morling recently attended Kafka Summit in London where he gave a talk on Change Data Streaming Patterns for Microservices With Debezium. You can watch the full presentation here.

Strimzi provides an easy way to run Apache Kafka on Kubernetes or Openshift. This article by Sincy Sebastian shows just how simple it is to replicate change events from MySQL to Elastic Search using Debezium.

Debezium allows replicating data between heterogeneous data stores with ease. This article by Matthew Groves explains how you can replicate data from MySQL to CouchBase.

As the size of data that systems maintain continues to grow, this begins to impact how we capture, compute, and report real-time analytics. This article by Maria Patterson explains how you can use Debezium to stream data from Postgres, perform analytical calculations using KSQL, and then stream those results back to Postgres for consumption.

In a recent article published in Portuguese, Paulo Singaretti illustrates how they use Debezium and Kafka to stream changes from their relational database and then store the change stream results in Google Cloud Services.

This recent blog by Jia Zhai provides a complete tutorial showing how to use Debezium connectors with Apache Pulsar.

Time to upgrade

Debezium version 0.9.5 was just released. If you are using the 0.9 branch you should definitely check out 0.9.5. For details on the bug fixes as well as the enhancements this version includes, check out the release notes.

The Debezium team has also begun active development on the next major version, 0.10. We recently published a blog that provides an overview behind what 0.10 is meant to deliver. If you want details on the bug fixes and enhancements we’ve packed into this release, you can view the issue list.

Feedback

We intend to publish new additions of this newsletter periodically. Should anyone have any suggestions on changes or what could be highlighted here, we welcome that feedback. You can reach out to us via any of our community channels found here.

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/06/12/debezium-0-10-0-beta1-released/index.html b/blog/2019/06/12/debezium-0-10-0-beta1-released/index.html index 118d3fe8a8..289c447b4e 100644 --- a/blog/2019/06/12/debezium-0-10-0-beta1-released/index.html +++ b/blog/2019/06/12/debezium-0-10-0-beta1-released/index.html @@ -1 +1 @@ - Debezium 0.10.0.Beta1 Released

Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.10.0.Beta1 Released

Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/06/19/debezium-wears-fedora/index.html b/blog/2019/06/19/debezium-wears-fedora/index.html index 134f864c91..25c6260df3 100644 --- a/blog/2019/06/19/debezium-wears-fedora/index.html +++ b/blog/2019/06/19/debezium-wears-fedora/index.html @@ -67,4 +67,4 @@ #wal_sender_timeout = 60s # in milliseconds; 0 disables max_replication_slots = 4 # max number of replication slots (change requires restart)

Configure the security file /var/lib/pgsql/data/pg_hba.conf for the database user that will be used by Debezium (e.g. debezium) by adding these parameters:

local   replication     debezium                          trust
 host    replication     debezium  127.0.0.1/32            trust
-host    replication     debezium  ::1/128                 trust

Finally, restart PostgreSQL:

$ sudo systemctl restart postgresql

And that’s it: Now we have a PostgreSQL database, that is ready to stream changes to the Debezium PostgreSQL connector. Of course, the plug-in can also be installed to an already existing database (Postgres versions 9 and later), just by installing the RPM package and setting up the config and security files in the described way.

Outlook: pgoutput

While the decoderbufs plug-in is our recommended choice for a logical decoding plug-in, there are cases where you may not be able to use it. Most specifically, you typically don’t have the flexibility to install custom plug-ins in cloud-based environments such as Amazon RDS.

This is why we’re exploring a third alternative to decoderbufs and wal2sjon right now, which is to leverage Postgres logical replication mechanism. There’s a built-in plug-in, pgoutput based on this, which exists in every Postgres database since version 10. We’re still in the process of exploring the implications (and possible limitations) of using pgoutput, but so far things look promising and it may eventually be a valuable tool to have in the box.

Stay tuned for more details coming soon!

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +host replication debezium ::1/128 trust

Finally, restart PostgreSQL:

$ sudo systemctl restart postgresql

And that’s it: Now we have a PostgreSQL database, that is ready to stream changes to the Debezium PostgreSQL connector. Of course, the plug-in can also be installed to an already existing database (Postgres versions 9 and later), just by installing the RPM package and setting up the config and security files in the described way.

Outlook: pgoutput

While the decoderbufs plug-in is our recommended choice for a logical decoding plug-in, there are cases where you may not be able to use it. Most specifically, you typically don’t have the flexibility to install custom plug-ins in cloud-based environments such as Amazon RDS.

This is why we’re exploring a third alternative to decoderbufs and wal2sjon right now, which is to leverage Postgres logical replication mechanism. There’s a built-in plug-in, pgoutput based on this, which exists in every Postgres database since version 10. We’re still in the process of exploring the implications (and possible limitations) of using pgoutput, but so far things look promising and it may eventually be a valuable tool to have in the box.

Stay tuned for more details coming soon!

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/06/28/debezium-0-10-0-beta2-released/index.html b/blog/2019/06/28/debezium-0-10-0-beta2-released/index.html index 7d93e49c72..bb34d96897 100644 --- a/blog/2019/06/28/debezium-0-10-0-beta2-released/index.html +++ b/blog/2019/06/28/debezium-0-10-0-beta2-released/index.html @@ -1 +1 @@ - Debezium 0.10.0.Beta2 Released

It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

The SQL Server connector will use much less memory in many situations (DBZ-1065) and it’s configurable now whether it should emit tombstone events for deletions or not (DBZ-835). This also was added for the Oracle connector, bringing consistency for this option across all the connectors.

Note that this release can be used with Apache Kafka 2.x, but not with 1.x. This was an unintentional change and compatibility with 1.x will be restored for the Beta3 release (the issue to track is DBZ-1361).

Please refer to the 0.10.0.Beta2 release notes to learn more about all resolved issues and the upgrading procedure.

Many thanks to everybody from the Debezium community who contributed to this release: Cheng Pan, Guillaume Rosauro, Mariusz Strzelecki and Stathis Souris.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.10.0.Beta2 Released

It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

The SQL Server connector will use much less memory in many situations (DBZ-1065) and it’s configurable now whether it should emit tombstone events for deletions or not (DBZ-835). This also was added for the Oracle connector, bringing consistency for this option across all the connectors.

Note that this release can be used with Apache Kafka 2.x, but not with 1.x. This was an unintentional change and compatibility with 1.x will be restored for the Beta3 release (the issue to track is DBZ-1361).

Please refer to the 0.10.0.Beta2 release notes to learn more about all resolved issues and the upgrading procedure.

Many thanks to everybody from the Debezium community who contributed to this release: Cheng Pan, Guillaume Rosauro, Mariusz Strzelecki and Stathis Souris.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/07/08/tutorial-sentry-debezium-container-images/index.html b/blog/2019/07/08/tutorial-sentry-debezium-container-images/index.html index 4a1078288e..dc4019a0d2 100644 --- a/blog/2019/07/08/tutorial-sentry-debezium-container-images/index.html +++ b/blog/2019/07/08/tutorial-sentry-debezium-container-images/index.html @@ -85,4 +85,4 @@ RUN docker-maven-download \ central io/sentry sentry "$SENTRY_VERSION" 4bf1d6538c9c0ebc22526e2094b9bbde && \ docker-maven-download \ - central io/sentry sentry-log4j "$SENTRY_VERSION" 74af872827bd7e1470fd966449637a77

Build and Run

Now we can simply build the image:

$ docker build -t debezium/connect-sentry:1 --build-arg=JKS_STOREPASS="123456789" .

When running the image we have now to configure our Kafka Connect application to load the JKS file by setting KAFKA_OPTS: -Djavax.net.ssl.trustStore=/ssl/certificates.jks -Djavax.net.ssl.trustStorePassword=<YOUR TRUSTSTORE PASSWORD>.

Sentry can be configured in many ways, I like to do it via environment variables, the minimum we can set is the Sentry DSN (which is necessary to point to your project) and the actual running environment name (i.e.: production, staging).

In this case we can configure the variables: SENTRY_DSN=<GET THE DNS IN SENTRY’S DASHBOARD>, SENTRY_ENVIRONMENT=dev.

In case you’d like to learn more about using the Debezium container images, please check our tutorial.

And that’s it, a basic a recipe for extending our Docker setup using Sentry as an example; other modifications should also be as simple as this one. As an example how a RecordTooLarge exception from the Kafka producer would look like in this setup, see the picture below:

Sentry Exception example

Conclusion

Thanks to the recent refactor of the Debezium container images, it got very easy to amend them with your custom extensions. Downloading external dependencies and adding them to the images became a trivial task and we’d love to hear your feedback about it!

If you are curious about the refactoring itself, you can find the details in pull request debezium/container-images#131.

Renato Mefi

Renato Mefi is a Staff Engineer at Usabilla (SurveyMonkey), where he innovates mostly around Kafka, Docker and DevOps in general, has contributed to Kafka, Debezium and others. He lives in Amsterdam and cycles for fun and commute.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + central io/sentry sentry-log4j "$SENTRY_VERSION" 74af872827bd7e1470fd966449637a77

Build and Run

Now we can simply build the image:

$ docker build -t debezium/connect-sentry:1 --build-arg=JKS_STOREPASS="123456789" .

When running the image we have now to configure our Kafka Connect application to load the JKS file by setting KAFKA_OPTS: -Djavax.net.ssl.trustStore=/ssl/certificates.jks -Djavax.net.ssl.trustStorePassword=<YOUR TRUSTSTORE PASSWORD>.

Sentry can be configured in many ways, I like to do it via environment variables, the minimum we can set is the Sentry DSN (which is necessary to point to your project) and the actual running environment name (i.e.: production, staging).

In this case we can configure the variables: SENTRY_DSN=<GET THE DNS IN SENTRY’S DASHBOARD>, SENTRY_ENVIRONMENT=dev.

In case you’d like to learn more about using the Debezium container images, please check our tutorial.

And that’s it, a basic a recipe for extending our Docker setup using Sentry as an example; other modifications should also be as simple as this one. As an example how a RecordTooLarge exception from the Kafka producer would look like in this setup, see the picture below:

Sentry Exception example

Conclusion

Thanks to the recent refactor of the Debezium container images, it got very easy to amend them with your custom extensions. Downloading external dependencies and adding them to the images became a trivial task and we’d love to hear your feedback about it!

If you are curious about the refactoring itself, you can find the details in pull request debezium/container-images#131.

Renato Mefi

Renato Mefi is a Staff Engineer at Usabilla (SurveyMonkey), where he innovates mostly around Kafka, Docker and DevOps in general, has contributed to Kafka, Debezium and others. He lives in Amsterdam and cycles for fun and commute.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/07/12/streaming-cassandra-at-wepay-part-1/index.html b/blog/2019/07/12/streaming-cassandra-at-wepay-part-1/index.html index e783228482..f2d5f1eafa 100644 --- a/blog/2019/07/12/streaming-cassandra-at-wepay-part-1/index.html +++ b/blog/2019/07/12/streaming-cassandra-at-wepay-part-1/index.html @@ -1 +1 @@ - Streaming Cassandra at WePay - Part 1

This post originally appeared on the WePay Engineering blog.

Historically, MySQL had been the de-facto database of choice for microservices at WePay. As WePay scales, the sheer volume of data written into some of our microservice databases demanded us to make a scaling decision between sharded MySQL (i.e. Vitess) and switching to a natively sharded NoSQL database. After a series of evaluations, we picked Cassandra, a NoSQL database, primarily because of its high availability, horizontal scalability, and ability to handle high write throughput.

Batch ETL Options

After introducing Cassandra to our infrastructure, our next challenge was to figure out a way to expose data in Cassandra to BigQuery, our data warehouse, for analytics and reporting. We quickly built an Airflow hook and operator to execute full loads. This obviously doesn’t scale, as it rewrites the entire database on each load. To scale the pipeline, we evaluated two incremental load approaches, but both have their shortcomings:

  1. Range query. This is a common ETL approach where data is extracted via a range query at regular intervals, such as hourly or daily. Anyone familiar with Cassandra data modelling would quickly realize how unrealistic this approach is. Cassandra tables need to be modeled to optimize query patterns used in production. Adding this query pattern for analytics in most cases means cloning the table with different clustering keys. RDBMS folks might suggest secondary index to support this query pattern, but secondary index in Cassandra are local, therefore this approach would pose performance and scaling issues of its own.

  2. Process unmerged SSTables. SSTables are Cassandra’s immutable storage files. Cassandra offers a sstabledump CLI command that converts SSTable content into human-readable JSON. However, Cassandra is built on top of the concept of Log-Structured Merge (LSM) Tree, meaning SSTables merge periodically into new compacted files. Depending on the compaction strategy, detecting unmerged SSTable files out-of-band may be challenging (we later learned about the incremental backup feature in Cassandra which only backs up uncompacted SSTables; so this approach would have worked as well.)

Given these challenges, and having built and operated a streaming data pipeline for MySQL, we began to explore streaming options for Cassandra.

Streaming Options

Double-Writing

Image showing writer send two distinct writes

The idea is to publish to Kafka every time a write is performed on Cassandra. This double-writing could be performed via the built-in trigger or a custom wrapper around the client. There are performance problems with this approach. First, due to the fact that we now need to write to two systems instead of one, write latency is increased. More importantly, when a write to one system fails due to a timeout, whether the write is successful or not is indeterministic. To guarantee data consistency on both systems, we would have to implement distributed transactions, but multiple roundtrips for consensus will increase latency and reduce throughput further. This defeats the purpose of a high write-throughput database.

Kafka as Event Source

Image showing writes sent to Kafka and then downstream DB

The idea is to write to Kafka rather than directly writing to Cassandra; and then apply the writes to Cassandra by consuming events from Kafka. Event sourcing is a pretty popular approach these days. However, if you already have existing services directly writing to Cassandra, it would require a change in application code and a nontrivial migration. This approach also violates read-your-writes consistency: the requirement that if a process performs a write, then the same process performing a subsequent read must observe the write’s effects. Since writes are routed through Kafka, there will be a lag between when the write is issued and when it is applied; during this time, reads to Cassandra will result in stale data. This may cause unforeseeable production issues.

Parsing Commit Logs

Image showing commit logs sent to Kafka

Cassandra introduced a change data capture (CDC) feature in 3.0 to expose its commit logs. Commit logs are write-ahead logs in Cassandra designed to provide durability in case of machine crashes. They are typically discarded upon flush. With CDC enabled, they are instead transferred to a local CDC directory upon flush, which is then readable by other processes on the Cassandra node. This allows us to use the same CDC mechanism as in our MySQL streaming pipeline. It decouples production operations from analytics, and thus does not require additional work from application engineers.

Ultimately, after considering throughput, consistency, and separation of concerns, the final option – parsing commit logs – became the top contender.

Commit Log Deep Dive

Aside from exposing commit logs, Cassandra also provides CommitLogReader and CommitLogReadHandler classes to help with the deserialization of logs. It seems like the hard work has been done, and what’s left is applying transformations – converting deserialized representations into Avro records and publish them to Kafka. However, as we dug further into the implementation of the CDC feature and of Cassandra itself, we realized that there are many new challenges.

Delayed Processing

Commit logs only arrive in the CDC directory when it is full, in which case it would be flushed/discarded. This implies there is a delay between when the event is logged and when the event is captured. If little to no writes are executed, then the delay in event capturing could be arbitrarily long.

Space Management

In MySQL you can set binlog retention such that the logs will be automatically deleted after the configured retention period. However in Cassandra there is no such option. Once the commit logs are transferred to CDC directory, consumption must be in place to clean up commit logs after processing. If the available disk space for CDC directory exceeds a given threshold, further writes to the database will be rejected.

Duplicated Events

Commit logs on an individual Cassandra node do not reflect all writes to the cluster; they only reflect writes to the node. This makes it necessary to process commit logs on all nodes. But with a replication factor of N, N copies of each event are sent downstream.

Out-of-Order Events

Writes to an individual Cassandra node are logged serially as they arrive. However, these events may arrive out-of-order from when they are issued. Downstream consumers of these events must understand the event time and implement last write wins logic similar to Cassandra’s read path to get the correct result.

Out-of-Band Schema Change

Schema changes of tables are communicated via a gossip protocol and are not recorded in commit logs. Therefore changes in schema could only be detected on a best-effort basis.

Incomplete Row Data

Cassandra does not perform read before write, as a result change events do not capture the state of every column, they only capture the state of modified columns. This makes the change event less useful than if the full row is available.

Once we acquired a deep understanding of Cassandra commit logs, we re-assessed our requirements against the given constraints in order to design a minimum viable infrastructure.

Minimum Viable Infrastructure

Borrowing from the minimum viable product philosophy, we want to design a data pipeline with a minimum set of features and requirements to satisfy our immediate customers. For Cassandra CDC, this means:

  • Production database’s health and performance should not be negatively impacted by introducing CDC; slowed operations and system downtimes are much costlier than a delay in the analytics pipeline

  • Querying Cassandra tables in our data warehouse should match the results of querying the production database (barring delays); having duplicate and/or incomplete rows amplifies post-processing workload for every end user With these criteria in front of us, we began to brainstorm for solutions, and ultimately came up with three approaches:

Stateless Stream Processing

This solution is inspired by Datastax’s advanced replication blog post. The idea is to deploy an agent on each Cassandra node to process local commit logs. Each agent is considered as “primary” for a subset of writes based on partition keys, such that every event has exactly one primary agent. Then during CDC, in order to avoid duplicate events, each agent only sends an event to Kafka if it is the primary agent for the event. To handle eventual consistency, each agent would sort events into per-table time-sliced windows as they arrive (but doesn’t publish them right away); when a window expires, events in that window are hashed, and the hash is compared against other nodes. If they don’t match, data is fetched from the inconsistent node so the correct value could be resolved by last write wins. Finally the corrected events in that window will be sent to Kafka. Any out-of-order event beyond the time-sliced windows would have to be logged into an out-of-sequence file and handled separately. Since deduplication and ordering are done in-memory, concerns with agent failover causing data loss, OOM issues impacting production database, and the overall complexity of this implementation stopped us from exploring it further.

Stateful Stream Processing

This solution is the most feature rich. The idea is that the agent on each Cassandra node will process commit logs and publish events to Kafka without deduplication and ordering. Then a stream processing engine will consume these raw events and do the heavy lifting (such as filtering out duplicate events with a cache, managing event orders with event-time windowing, and capturing state of unmodified columns by performing read before write on a state store), and then publish these derived events to a separate Kafka topic. Finally, KCBQ will be used to consume events from this topic and upload them to BigQuery. This approach is appealing because it solves the problem generically – anyone can subscribe to the latter Kafka topic without needing to handle deduplication and ordering on their own. However, this approach introduces a nontrivial amount of operational overhead; we would have to maintain a stream processing engine, a database, and a cache.

Processing-On-Read

Similar to the previous approach, the idea is to process commit logs on each Cassandra node and send events to Kafka without deduplication and ordering. Unlike the previous approach, the stream processing portion is completely eliminated. Instead the raw events will be directly uploaded to BigQuery via KCBQ. Views are created on top of the raw tables to handle deduplication, ordering, and merging of columns to form complete rows. Because BigQuery views are virtual tables, the processing is done lazily each time the view is queried. To prevent the view query from getting too expensive, the views would be materialized periodically. This approach removes both operational complexity and code complexity by leveraging BigQuery’s massively parallel query engine. However, the drawback is that non-KCBQ downstream consumers must do all the work on their own.

Given that our main purpose of streaming Cassandra is data warehousing, we ultimately decided to implement processing-on-read. It provides the essential features for our existing use case, and offers the flexibility to expand into the other two more generic solutions mentioned above in the future.

Open Source

During this process of building a real-time data pipeline for Cassandra, we have received a substantial amount of interest on this project. As a result, we have decided to open-source the Cassandra CDC agent under the Debezium umbrella as an incubating connector. If you would like to learn more or contribute, check out the work-in-progress pull request for source code and documentation.

In the second half of this blog post series, we will elaborate on the CDC implementation itself in more details. Stay tuned!

Joy Gao

Joy Gao is a software engineering at WePay, where she focuses on change data capture, data warehousing, and distributed systems.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Streaming Cassandra at WePay - Part 1

This post originally appeared on the WePay Engineering blog.

Historically, MySQL had been the de-facto database of choice for microservices at WePay. As WePay scales, the sheer volume of data written into some of our microservice databases demanded us to make a scaling decision between sharded MySQL (i.e. Vitess) and switching to a natively sharded NoSQL database. After a series of evaluations, we picked Cassandra, a NoSQL database, primarily because of its high availability, horizontal scalability, and ability to handle high write throughput.

Batch ETL Options

After introducing Cassandra to our infrastructure, our next challenge was to figure out a way to expose data in Cassandra to BigQuery, our data warehouse, for analytics and reporting. We quickly built an Airflow hook and operator to execute full loads. This obviously doesn’t scale, as it rewrites the entire database on each load. To scale the pipeline, we evaluated two incremental load approaches, but both have their shortcomings:

  1. Range query. This is a common ETL approach where data is extracted via a range query at regular intervals, such as hourly or daily. Anyone familiar with Cassandra data modelling would quickly realize how unrealistic this approach is. Cassandra tables need to be modeled to optimize query patterns used in production. Adding this query pattern for analytics in most cases means cloning the table with different clustering keys. RDBMS folks might suggest secondary index to support this query pattern, but secondary index in Cassandra are local, therefore this approach would pose performance and scaling issues of its own.

  2. Process unmerged SSTables. SSTables are Cassandra’s immutable storage files. Cassandra offers a sstabledump CLI command that converts SSTable content into human-readable JSON. However, Cassandra is built on top of the concept of Log-Structured Merge (LSM) Tree, meaning SSTables merge periodically into new compacted files. Depending on the compaction strategy, detecting unmerged SSTable files out-of-band may be challenging (we later learned about the incremental backup feature in Cassandra which only backs up uncompacted SSTables; so this approach would have worked as well.)

Given these challenges, and having built and operated a streaming data pipeline for MySQL, we began to explore streaming options for Cassandra.

Streaming Options

Double-Writing

Image showing writer send two distinct writes

The idea is to publish to Kafka every time a write is performed on Cassandra. This double-writing could be performed via the built-in trigger or a custom wrapper around the client. There are performance problems with this approach. First, due to the fact that we now need to write to two systems instead of one, write latency is increased. More importantly, when a write to one system fails due to a timeout, whether the write is successful or not is indeterministic. To guarantee data consistency on both systems, we would have to implement distributed transactions, but multiple roundtrips for consensus will increase latency and reduce throughput further. This defeats the purpose of a high write-throughput database.

Kafka as Event Source

Image showing writes sent to Kafka and then downstream DB

The idea is to write to Kafka rather than directly writing to Cassandra; and then apply the writes to Cassandra by consuming events from Kafka. Event sourcing is a pretty popular approach these days. However, if you already have existing services directly writing to Cassandra, it would require a change in application code and a nontrivial migration. This approach also violates read-your-writes consistency: the requirement that if a process performs a write, then the same process performing a subsequent read must observe the write’s effects. Since writes are routed through Kafka, there will be a lag between when the write is issued and when it is applied; during this time, reads to Cassandra will result in stale data. This may cause unforeseeable production issues.

Parsing Commit Logs

Image showing commit logs sent to Kafka

Cassandra introduced a change data capture (CDC) feature in 3.0 to expose its commit logs. Commit logs are write-ahead logs in Cassandra designed to provide durability in case of machine crashes. They are typically discarded upon flush. With CDC enabled, they are instead transferred to a local CDC directory upon flush, which is then readable by other processes on the Cassandra node. This allows us to use the same CDC mechanism as in our MySQL streaming pipeline. It decouples production operations from analytics, and thus does not require additional work from application engineers.

Ultimately, after considering throughput, consistency, and separation of concerns, the final option – parsing commit logs – became the top contender.

Commit Log Deep Dive

Aside from exposing commit logs, Cassandra also provides CommitLogReader and CommitLogReadHandler classes to help with the deserialization of logs. It seems like the hard work has been done, and what’s left is applying transformations – converting deserialized representations into Avro records and publish them to Kafka. However, as we dug further into the implementation of the CDC feature and of Cassandra itself, we realized that there are many new challenges.

Delayed Processing

Commit logs only arrive in the CDC directory when it is full, in which case it would be flushed/discarded. This implies there is a delay between when the event is logged and when the event is captured. If little to no writes are executed, then the delay in event capturing could be arbitrarily long.

Space Management

In MySQL you can set binlog retention such that the logs will be automatically deleted after the configured retention period. However in Cassandra there is no such option. Once the commit logs are transferred to CDC directory, consumption must be in place to clean up commit logs after processing. If the available disk space for CDC directory exceeds a given threshold, further writes to the database will be rejected.

Duplicated Events

Commit logs on an individual Cassandra node do not reflect all writes to the cluster; they only reflect writes to the node. This makes it necessary to process commit logs on all nodes. But with a replication factor of N, N copies of each event are sent downstream.

Out-of-Order Events

Writes to an individual Cassandra node are logged serially as they arrive. However, these events may arrive out-of-order from when they are issued. Downstream consumers of these events must understand the event time and implement last write wins logic similar to Cassandra’s read path to get the correct result.

Out-of-Band Schema Change

Schema changes of tables are communicated via a gossip protocol and are not recorded in commit logs. Therefore changes in schema could only be detected on a best-effort basis.

Incomplete Row Data

Cassandra does not perform read before write, as a result change events do not capture the state of every column, they only capture the state of modified columns. This makes the change event less useful than if the full row is available.

Once we acquired a deep understanding of Cassandra commit logs, we re-assessed our requirements against the given constraints in order to design a minimum viable infrastructure.

Minimum Viable Infrastructure

Borrowing from the minimum viable product philosophy, we want to design a data pipeline with a minimum set of features and requirements to satisfy our immediate customers. For Cassandra CDC, this means:

  • Production database’s health and performance should not be negatively impacted by introducing CDC; slowed operations and system downtimes are much costlier than a delay in the analytics pipeline

  • Querying Cassandra tables in our data warehouse should match the results of querying the production database (barring delays); having duplicate and/or incomplete rows amplifies post-processing workload for every end user With these criteria in front of us, we began to brainstorm for solutions, and ultimately came up with three approaches:

Stateless Stream Processing

This solution is inspired by Datastax’s advanced replication blog post. The idea is to deploy an agent on each Cassandra node to process local commit logs. Each agent is considered as “primary” for a subset of writes based on partition keys, such that every event has exactly one primary agent. Then during CDC, in order to avoid duplicate events, each agent only sends an event to Kafka if it is the primary agent for the event. To handle eventual consistency, each agent would sort events into per-table time-sliced windows as they arrive (but doesn’t publish them right away); when a window expires, events in that window are hashed, and the hash is compared against other nodes. If they don’t match, data is fetched from the inconsistent node so the correct value could be resolved by last write wins. Finally the corrected events in that window will be sent to Kafka. Any out-of-order event beyond the time-sliced windows would have to be logged into an out-of-sequence file and handled separately. Since deduplication and ordering are done in-memory, concerns with agent failover causing data loss, OOM issues impacting production database, and the overall complexity of this implementation stopped us from exploring it further.

Stateful Stream Processing

This solution is the most feature rich. The idea is that the agent on each Cassandra node will process commit logs and publish events to Kafka without deduplication and ordering. Then a stream processing engine will consume these raw events and do the heavy lifting (such as filtering out duplicate events with a cache, managing event orders with event-time windowing, and capturing state of unmodified columns by performing read before write on a state store), and then publish these derived events to a separate Kafka topic. Finally, KCBQ will be used to consume events from this topic and upload them to BigQuery. This approach is appealing because it solves the problem generically – anyone can subscribe to the latter Kafka topic without needing to handle deduplication and ordering on their own. However, this approach introduces a nontrivial amount of operational overhead; we would have to maintain a stream processing engine, a database, and a cache.

Processing-On-Read

Similar to the previous approach, the idea is to process commit logs on each Cassandra node and send events to Kafka without deduplication and ordering. Unlike the previous approach, the stream processing portion is completely eliminated. Instead the raw events will be directly uploaded to BigQuery via KCBQ. Views are created on top of the raw tables to handle deduplication, ordering, and merging of columns to form complete rows. Because BigQuery views are virtual tables, the processing is done lazily each time the view is queried. To prevent the view query from getting too expensive, the views would be materialized periodically. This approach removes both operational complexity and code complexity by leveraging BigQuery’s massively parallel query engine. However, the drawback is that non-KCBQ downstream consumers must do all the work on their own.

Given that our main purpose of streaming Cassandra is data warehousing, we ultimately decided to implement processing-on-read. It provides the essential features for our existing use case, and offers the flexibility to expand into the other two more generic solutions mentioned above in the future.

Open Source

During this process of building a real-time data pipeline for Cassandra, we have received a substantial amount of interest on this project. As a result, we have decided to open-source the Cassandra CDC agent under the Debezium umbrella as an incubating connector. If you would like to learn more or contribute, check out the work-in-progress pull request for source code and documentation.

In the second half of this blog post series, we will elaborate on the CDC implementation itself in more details. Stay tuned!

Joy Gao

Joy Gao is a software engineering at WePay, where she focuses on change data capture, data warehousing, and distributed systems.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/07/15/streaming-cassandra-at-wepay-part-2/index.html b/blog/2019/07/15/streaming-cassandra-at-wepay-part-2/index.html index 6d96f51878..225f62ff55 100644 --- a/blog/2019/07/15/streaming-cassandra-at-wepay-part-2/index.html +++ b/blog/2019/07/15/streaming-cassandra-at-wepay-part-2/index.html @@ -1 +1 @@ - Streaming Cassandra at WePay - Part 2

This post originally appeared on the WePay Engineering blog.

In the first half of this blog post series, we explained our decision-making process of designing a streaming data pipeline for Cassandra at WePay. In this post, we will break down the pipeline into three sections and discuss each of them in more detail:

  1. Cassandra to Kafka with CDC agent

  2. Kafka with BigQuery with KCBQ

  3. Transformation with BigQuery view

Cassandra to Kafka with CDC Agent

The Cassandra CDC agent is a JVM process that is intended to be deployed on each node in a Cassandra cluster. The agent is comprised of several interdependent processors, running concurrently and working together to publish change events to Kafka.

Snapshot Processor

This processor is responsible for bootstrapping new tables. It looks up the CDC configuration to determine the snapshot mode, and performs snapshot on CDC-enabled tables if needed. To snapshot a table, the agent performs a full table scan and converts each row in the result set into an individual create event, and then sequentially enqueues them to an in-memory BlockingQueue.

Commit Log Processor

This processor is responsible for watching the CDC directory for new commit logs, parsing the commit log files via Cassandra’s CommitLogReader, transforming deserialized mutations into standardized change events, and finally enqueuing them to the same queue as the snapshot processor.

At this point, some readers may have concerns in regard to running Snapshot Processor and Commit Log Processors concurrently rather than serially. The reason is that Cassandra uses a client-side timestamp to determine event order, and resolves conflicts with last write wins. This client-side timestamp is deliberately stored in each change event. This is why snapshotting doesn’t have to proceed commit log processing – the ordering is determined later on when the data is queried in the data warehouse.

Queue Processor

This processor is responsible for dequeuing change events, transforming them into Avro records, and sending them to Kafka via a Kafka producer. It also tracks the position of the most recently sent event, so that on restart it is able to pick up from where it left off.

Implementing an in-memory queue in the CDC agent seems like overkill at first. Given there is only a single thread doing the enqueue and another thread doing the dequeue, the performance boost is negligible. The motivation here is to decouple the work of parsing commit logs, which should be done serially in the right order, from the work of serializing and publishing Kafka events, which can be parallelized by multiple threads for different tables. Although such parallelization is not implemented at the moment, we want the flexibility of adding this feature in the near future.

Some may also wonder why Kafka Connect is not used here as it seems like a natural fit for streaming. It is a great option if we wanted distributed parallel processing with fault tolerance. However, it is more complicated to deploy, monitor, and debug than a Kafka producer. For the purpose of building a minimum viable infrastructure, we chose Kafka producer at the time.

Schema Processor

In order to support automatic schema evolution, this processor periodically polls the database for the latest table schema, and updates the in-memory schema cache if a change is detected. Snapshot Processor and Commit Log Processor both look up table schema from this cache and attach it as part of the change event prior to enqueue. Then upon dequeue, the Queue Processor transforms the attached table schema into an Avro schema for record serialization.

Commit Log Post Processor

This processor is responsible for cleaning up commit logs after they have been processed. The default Commit Log Post Processor implementation will simply perform deletion. A custom Commit Log Post Processor can be configured for use case such as archiving commit log files to S3 or GCS.

Kafka to BigQuery with KCBQ

Once the events arrive in Kafka, we use KCBQ to send the events data to BigQuery without performing special transformations, just like in our MySQL streaming data pipeline. We have written a previous blog post explaining this connector in more details.

Transformation with BigQuery View

Once the events are in BigQuery, this is where the heavy-lifting is being done. We create virtual views on top of the raw tables to merge the data in a way that mirrors the source table in Cassandra. Note that each row in the raw tables contains limited data – only columns that have been modified have states. This means selecting the latest row for each primary key will not provide us with data that is consistent with source. Instead, the query must identify the latest cell in each column for each primary key. This can be achieved with self-joins on the primary key for each column in the table. Although joins are slow in MySQL, BigQuery’s parallel execution engine and columnar storage makes this possible. A view on top of a 1TB Cassandra table in BigQuery takes about 100 seconds to query.

Compaction

The fact that the BigQuery view is virtual implies each time the view is queried essentially triggers a full compaction of the raw data. This means the cost will go up with the number of queries, not to mention the duplicated events amplifies the amount of data that needs to be processed by a factor of N, where N is the replication factor. To save cost and improve performance, periodic compaction by materializing the view is necessary.

Future Development Work

Support for Cassandra 4.0

In Cassandra 4.0, the improved CDC feature allows the connector to be able to parse events in real-time as they are written rather than in micro-batches on each commit log flush. This reduces latency substantially.

Performance Optimization

As mentioned earlier, there is a single thread responsible for dequeuing, serializing, and publishing Kafka records. However, as the write throughput increases, if the performance of the agent does not keep up, it would result in a backlog of unprocessed commit logs which could potentially impact the health of our production database. The next step is to leverage parallel processing of events to optimize performance.

Streamline with Debezium and Kafka Connect

We initially built the Cassandra CDC agent as a standalone project. Now that it is open-sourced as a Debezium connector, we can replace some of our custom classes with existing ones in Debezium. Another improvement is to support common features that all Debezium connectors have, such as support for multiple serialization formats. Finally, the CDC agent is not fault tolerant; robust alert and monitoring are required as part of deployment. One area to explore in the future is to build the CDC agent on top of Kafka Connect as a source connector, this further streamlines the Cassandra connector with other Debezium connectors, and provides scalability and fault tolerance for free.

Closing Remarks

Cassandra being a peer-to-peer distributed database poses some really interesting challenges for CDC that do not exist in relational databases like MySQL and Postgres, or even a single-master NoSQL database like MongoDB. Note that it is worth evaluating the limitations before rolling out your own real-time data pipeline for Cassandra.

Besides understanding Cassandra internals, we learned a few lessons on engineering productivity along the way:

Minimum Viable Product Philosophy

By stripping away all features except for the essentials, we were able to build, test, and deploy a working solution in a reasonable time with limited resources. Had we aimed to design a pipeline that encompasses all features upfront, it would have taken a lot longer and required much more resources.

Community Involvement

Cassandra is an open-source project. Rather than tackling the problem solo, we were engaged with the Cassandra community from the very start (i.e. sharing experiences with committers and users via meetups, discussing proposals in mailing list, presenting proof-of-concept in conferences, etc.); all of which provided us with valuable feedback throughout the design and implementation stages.

Joy Gao

Joy Gao is a software engineering at WePay, where she focuses on change data capture, data warehousing, and distributed systems.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Streaming Cassandra at WePay - Part 2

This post originally appeared on the WePay Engineering blog.

In the first half of this blog post series, we explained our decision-making process of designing a streaming data pipeline for Cassandra at WePay. In this post, we will break down the pipeline into three sections and discuss each of them in more detail:

  1. Cassandra to Kafka with CDC agent

  2. Kafka with BigQuery with KCBQ

  3. Transformation with BigQuery view

Cassandra to Kafka with CDC Agent

The Cassandra CDC agent is a JVM process that is intended to be deployed on each node in a Cassandra cluster. The agent is comprised of several interdependent processors, running concurrently and working together to publish change events to Kafka.

Snapshot Processor

This processor is responsible for bootstrapping new tables. It looks up the CDC configuration to determine the snapshot mode, and performs snapshot on CDC-enabled tables if needed. To snapshot a table, the agent performs a full table scan and converts each row in the result set into an individual create event, and then sequentially enqueues them to an in-memory BlockingQueue.

Commit Log Processor

This processor is responsible for watching the CDC directory for new commit logs, parsing the commit log files via Cassandra’s CommitLogReader, transforming deserialized mutations into standardized change events, and finally enqueuing them to the same queue as the snapshot processor.

At this point, some readers may have concerns in regard to running Snapshot Processor and Commit Log Processors concurrently rather than serially. The reason is that Cassandra uses a client-side timestamp to determine event order, and resolves conflicts with last write wins. This client-side timestamp is deliberately stored in each change event. This is why snapshotting doesn’t have to proceed commit log processing – the ordering is determined later on when the data is queried in the data warehouse.

Queue Processor

This processor is responsible for dequeuing change events, transforming them into Avro records, and sending them to Kafka via a Kafka producer. It also tracks the position of the most recently sent event, so that on restart it is able to pick up from where it left off.

Implementing an in-memory queue in the CDC agent seems like overkill at first. Given there is only a single thread doing the enqueue and another thread doing the dequeue, the performance boost is negligible. The motivation here is to decouple the work of parsing commit logs, which should be done serially in the right order, from the work of serializing and publishing Kafka events, which can be parallelized by multiple threads for different tables. Although such parallelization is not implemented at the moment, we want the flexibility of adding this feature in the near future.

Some may also wonder why Kafka Connect is not used here as it seems like a natural fit for streaming. It is a great option if we wanted distributed parallel processing with fault tolerance. However, it is more complicated to deploy, monitor, and debug than a Kafka producer. For the purpose of building a minimum viable infrastructure, we chose Kafka producer at the time.

Schema Processor

In order to support automatic schema evolution, this processor periodically polls the database for the latest table schema, and updates the in-memory schema cache if a change is detected. Snapshot Processor and Commit Log Processor both look up table schema from this cache and attach it as part of the change event prior to enqueue. Then upon dequeue, the Queue Processor transforms the attached table schema into an Avro schema for record serialization.

Commit Log Post Processor

This processor is responsible for cleaning up commit logs after they have been processed. The default Commit Log Post Processor implementation will simply perform deletion. A custom Commit Log Post Processor can be configured for use case such as archiving commit log files to S3 or GCS.

Kafka to BigQuery with KCBQ

Once the events arrive in Kafka, we use KCBQ to send the events data to BigQuery without performing special transformations, just like in our MySQL streaming data pipeline. We have written a previous blog post explaining this connector in more details.

Transformation with BigQuery View

Once the events are in BigQuery, this is where the heavy-lifting is being done. We create virtual views on top of the raw tables to merge the data in a way that mirrors the source table in Cassandra. Note that each row in the raw tables contains limited data – only columns that have been modified have states. This means selecting the latest row for each primary key will not provide us with data that is consistent with source. Instead, the query must identify the latest cell in each column for each primary key. This can be achieved with self-joins on the primary key for each column in the table. Although joins are slow in MySQL, BigQuery’s parallel execution engine and columnar storage makes this possible. A view on top of a 1TB Cassandra table in BigQuery takes about 100 seconds to query.

Compaction

The fact that the BigQuery view is virtual implies each time the view is queried essentially triggers a full compaction of the raw data. This means the cost will go up with the number of queries, not to mention the duplicated events amplifies the amount of data that needs to be processed by a factor of N, where N is the replication factor. To save cost and improve performance, periodic compaction by materializing the view is necessary.

Future Development Work

Support for Cassandra 4.0

In Cassandra 4.0, the improved CDC feature allows the connector to be able to parse events in real-time as they are written rather than in micro-batches on each commit log flush. This reduces latency substantially.

Performance Optimization

As mentioned earlier, there is a single thread responsible for dequeuing, serializing, and publishing Kafka records. However, as the write throughput increases, if the performance of the agent does not keep up, it would result in a backlog of unprocessed commit logs which could potentially impact the health of our production database. The next step is to leverage parallel processing of events to optimize performance.

Streamline with Debezium and Kafka Connect

We initially built the Cassandra CDC agent as a standalone project. Now that it is open-sourced as a Debezium connector, we can replace some of our custom classes with existing ones in Debezium. Another improvement is to support common features that all Debezium connectors have, such as support for multiple serialization formats. Finally, the CDC agent is not fault tolerant; robust alert and monitoring are required as part of deployment. One area to explore in the future is to build the CDC agent on top of Kafka Connect as a source connector, this further streamlines the Cassandra connector with other Debezium connectors, and provides scalability and fault tolerance for free.

Closing Remarks

Cassandra being a peer-to-peer distributed database poses some really interesting challenges for CDC that do not exist in relational databases like MySQL and Postgres, or even a single-master NoSQL database like MongoDB. Note that it is worth evaluating the limitations before rolling out your own real-time data pipeline for Cassandra.

Besides understanding Cassandra internals, we learned a few lessons on engineering productivity along the way:

Minimum Viable Product Philosophy

By stripping away all features except for the essentials, we were able to build, test, and deploy a working solution in a reasonable time with limited resources. Had we aimed to design a pipeline that encompasses all features upfront, it would have taken a lot longer and required much more resources.

Community Involvement

Cassandra is an open-source project. Rather than tackling the problem solo, we were engaged with the Cassandra community from the very start (i.e. sharing experiences with committers and users via meetups, discussing proposals in mailing list, presenting proof-of-concept in conferences, etc.); all of which provided us with valuable feedback throughout the design and implementation stages.

Joy Gao

Joy Gao is a software engineering at WePay, where she focuses on change data capture, data warehousing, and distributed systems.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/07/25/debezium-0-10-0-beta3-released/index.html b/blog/2019/07/25/debezium-0-10-0-beta3-released/index.html index fa335ab210..8a6fcd6b28 100644 --- a/blog/2019/07/25/debezium-0-10-0-beta3-released/index.html +++ b/blog/2019/07/25/debezium-0-10-0-beta3-released/index.html @@ -1 +1 @@ - Debezium 0.10.0.Beta3 Released

The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

This version not only continues in incremental improvements of Debezium but also brings new shiny features.

All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

There is a set of further minor improvements. The tombstones for deletes are configurable for all connectors now (DBZ-1365). Also tables without primary keys are now supported for all connectors (DBZ-916). This further reduces the gap between old and new connectors capabilities.

There are improvements for heartbeat system. Heartbeat messages now contain the timestamp (DBZ-1363) of when they were created in their body. The new messages are properly skipped by the Outbox router (DBZ-1388). MySQL connector additionally uses heartbeats for BinlogReader (DBZ-1338). MongoDB connector now utilizes heartbeats too (DBZ-1198).

As we now that metrics are very important for keeping Debezium happy in production we have extended the set of supported metrics. A new metric count of events in error (DBZ-1222) is added so it is easy to monitor any non-standards in processing. Database history recovery can take a long time during startup so it is now possible to monitor the progress of it (DBZ-1356).

The other changes include updating of Docker images to use Kafka 2.3.0 (DBZ-1358). PostgreSQL supports lockless snapshotting (DBZ-1238) and Outbox router now process delete messages (DBZ-1320).

We continue with stabilization of the 0.10 release line, with lots of bug fixes to the different connectors.

Multiple defects in MySQL parser have been fixed (DBZ-1398, (DBZ-1397, DBZ-1376) and SAVEPOINT statements are no longer recorded in database history (DBZ-794).

Under certain circumstances, it was possible that PostgreSQL connector lost the first event while switching to streaming from the snapshot (DBZ-1400).

Please refer to the 0.10.0.Beta3 release notes to learn more about all resolved issues and the upgrading procedure.

Many thanks to everybody from the Debezium community who contributed to this release: Addison Higham, Bin Li, Brandon Brown and Renato Mefi.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.10.0.Beta3 Released

The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

This version not only continues in incremental improvements of Debezium but also brings new shiny features.

All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

There is a set of further minor improvements. The tombstones for deletes are configurable for all connectors now (DBZ-1365). Also tables without primary keys are now supported for all connectors (DBZ-916). This further reduces the gap between old and new connectors capabilities.

There are improvements for heartbeat system. Heartbeat messages now contain the timestamp (DBZ-1363) of when they were created in their body. The new messages are properly skipped by the Outbox router (DBZ-1388). MySQL connector additionally uses heartbeats for BinlogReader (DBZ-1338). MongoDB connector now utilizes heartbeats too (DBZ-1198).

As we now that metrics are very important for keeping Debezium happy in production we have extended the set of supported metrics. A new metric count of events in error (DBZ-1222) is added so it is easy to monitor any non-standards in processing. Database history recovery can take a long time during startup so it is now possible to monitor the progress of it (DBZ-1356).

The other changes include updating of Docker images to use Kafka 2.3.0 (DBZ-1358). PostgreSQL supports lockless snapshotting (DBZ-1238) and Outbox router now process delete messages (DBZ-1320).

We continue with stabilization of the 0.10 release line, with lots of bug fixes to the different connectors.

Multiple defects in MySQL parser have been fixed (DBZ-1398, (DBZ-1397, DBZ-1376) and SAVEPOINT statements are no longer recorded in database history (DBZ-794).

Under certain circumstances, it was possible that PostgreSQL connector lost the first event while switching to streaming from the snapshot (DBZ-1400).

Please refer to the 0.10.0.Beta3 release notes to learn more about all resolved issues and the upgrading procedure.

Many thanks to everybody from the Debezium community who contributed to this release: Addison Higham, Bin Li, Brandon Brown and Renato Mefi.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/08/20/debezium-0-10-0-beta4-released/index.html b/blog/2019/08/20/debezium-0-10-0-beta4-released/index.html index c092c2929a..b5b62f02ce 100644 --- a/blog/2019/08/20/debezium-0-10-0-beta4-released/index.html +++ b/blog/2019/08/20/debezium-0-10-0-beta4-released/index.html @@ -1 +1 @@ - Debezium 0.10.0.Beta4 Released

The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

Incubating Cassandra Connector

If you have been following this blog lately, you’ll have read about the latest addition to the Debezium family in Joy Gao’s excellent posts about the new connector (part 1, part 2).

In case you haven’t read those yet, we’d highly recommend to do so in order to learn more about the challenges encountered when implementing a CDC connector for a distributed datastore such as Cassandra as well as the design decisions made in order to come up with a first "minimal viable product". Joy also did a great talk at QCon last year, which touches on the topic of CDC for Cassandra.

Having been originally developed internally at long-term Debezium user WePay, the WePay team decided to open-source their work, put it under the Debezium umbrella and continue to evolve it there. That’s really great news for the Debezium community! We couldn’t be happier about this contribution and look forward to evolving this new connector together in the open.

At this point the Cassandra connector is in "incubating" state, i.e. its design and implementation are still pretty much in flux, the event structure which it creates may change in future releases etc. Note that, unlike the other Debezium connectors, this one currently is not based on Kafka Connect. Instead, it is implemented as a standalone process running on Cassandra node(s) themselves. Refer to the blog posts linked above for the reasoning behind this design and possible future developments around this. Needless to say, any ideas and contributions in this area will be highly welcomed.

Together with the connector we’ve also provided an initial draft of the connector documentation; this is still work-in-progress and will be amended in the next few days.

Further New Features

The Postgres connector supports the metrics known from SQL Server and Oracle now (DBZ-777). When using the SQL Server connector, it is now ensured that tables are snapshotted in a deterministic order, as defined by the given table whitelist configuration (DBZ-1254).

There have also been two improvements to our SMTs (single message transformations):

  • The SMT for new record state extraction allows to add additional columns for propagating metadata fields from the source block (DBZ-1395, e.g. useful to propagate the transaction into sink tables).

  • The default structure produced by the outbox routing SMT has been further streamlined (DBZ-1385); the message value will now only contain the contents of the configured outbox table payload column. In case you want to re-add the eventType value, you can configure it as an "additional field", which either goes into the message as a header (recommended) or into the message value, which as before will be a nested structure then.

Bugfixes and Other Improvements

Finally, here’s an overview of asorted bugfixes in the 0.10 Beta4 release:

  • The MySQL connector handles GRANT DELETE ON <table> statements correctly (DBZ-1411)

  • Superfluous tables scans are avoided when using the initial_schema_only snapshot strategy with SQL Server (DBZ-1417)

  • The superfluous creation of connections is avoided when obtaining the xmin position of Postgres (DBZ-1381)

  • The new record state extraction SMT handles heartbeat events correctly (DBZ-1430)

Please refer to the 0.10.0.Beta4 release notes for the complete list of addressed issues and the upgrading procedure.

A big thank you goes out to all the contributors from the Debezium community who worked on this release: Joy Gao, Renato Mefi and Guillaume Rosauro!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.10.0.Beta4 Released

The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

Incubating Cassandra Connector

If you have been following this blog lately, you’ll have read about the latest addition to the Debezium family in Joy Gao’s excellent posts about the new connector (part 1, part 2).

In case you haven’t read those yet, we’d highly recommend to do so in order to learn more about the challenges encountered when implementing a CDC connector for a distributed datastore such as Cassandra as well as the design decisions made in order to come up with a first "minimal viable product". Joy also did a great talk at QCon last year, which touches on the topic of CDC for Cassandra.

Having been originally developed internally at long-term Debezium user WePay, the WePay team decided to open-source their work, put it under the Debezium umbrella and continue to evolve it there. That’s really great news for the Debezium community! We couldn’t be happier about this contribution and look forward to evolving this new connector together in the open.

At this point the Cassandra connector is in "incubating" state, i.e. its design and implementation are still pretty much in flux, the event structure which it creates may change in future releases etc. Note that, unlike the other Debezium connectors, this one currently is not based on Kafka Connect. Instead, it is implemented as a standalone process running on Cassandra node(s) themselves. Refer to the blog posts linked above for the reasoning behind this design and possible future developments around this. Needless to say, any ideas and contributions in this area will be highly welcomed.

Together with the connector we’ve also provided an initial draft of the connector documentation; this is still work-in-progress and will be amended in the next few days.

Further New Features

The Postgres connector supports the metrics known from SQL Server and Oracle now (DBZ-777). When using the SQL Server connector, it is now ensured that tables are snapshotted in a deterministic order, as defined by the given table whitelist configuration (DBZ-1254).

There have also been two improvements to our SMTs (single message transformations):

  • The SMT for new record state extraction allows to add additional columns for propagating metadata fields from the source block (DBZ-1395, e.g. useful to propagate the transaction into sink tables).

  • The default structure produced by the outbox routing SMT has been further streamlined (DBZ-1385); the message value will now only contain the contents of the configured outbox table payload column. In case you want to re-add the eventType value, you can configure it as an "additional field", which either goes into the message as a header (recommended) or into the message value, which as before will be a nested structure then.

Bugfixes and Other Improvements

Finally, here’s an overview of asorted bugfixes in the 0.10 Beta4 release:

  • The MySQL connector handles GRANT DELETE ON <table> statements correctly (DBZ-1411)

  • Superfluous tables scans are avoided when using the initial_schema_only snapshot strategy with SQL Server (DBZ-1417)

  • The superfluous creation of connections is avoided when obtaining the xmin position of Postgres (DBZ-1381)

  • The new record state extraction SMT handles heartbeat events correctly (DBZ-1430)

Please refer to the 0.10.0.Beta4 release notes for the complete list of addressed issues and the upgrading procedure.

A big thank you goes out to all the contributors from the Debezium community who worked on this release: Joy Gao, Renato Mefi and Guillaume Rosauro!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/09/05/website-documentation-overhaul/index.html b/blog/2019/09/05/website-documentation-overhaul/index.html index 7ec1c805b5..3015ec36ce 100644 --- a/blog/2019/09/05/website-documentation-overhaul/index.html +++ b/blog/2019/09/05/website-documentation-overhaul/index.html @@ -1 +1 @@ - Site and Documentation Overhaul

This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

New Releases Page

It is important that the Debezium community be able to find information easily about a given release series. We have introduced a new Releases area on the site that describes details about each release series (e.g. 0.9, the current stable release and 0.10, the current development release) such as:

  • What database or Apache Kafka (Connect) platforms were tested

  • What Java version is supported

  • How to migrate to a specific release series

  • Where to download the connectors or other series artifacts

  • What changes were introduced in that series

  • And much more…​

The goal is to make it simple and easy to find all information about a specific release series in a single place.

New Documentation

Probably one of the most limiting factors with how our documentation was published previously is that it focused solely on the idea of the latest stable version. Also the documentation sources were separated from the actual code sources in the main code repository. This presented several drawbacks:

  • Confusing to users of older releases

  • Prevented publishing documentation for development versions

  • Caused friction for contributors when implementing new features that need documentation updates

What we felt we needed to provide the community was documentation published by version. This would allow documentation to be tailored specific to that version, allowing fluid changes for future versions without impact to prior versions.

Such a solution also has the benefit that it enables the Debezium team to publish development version documentation easily, which is a critical step in helping users who test unstable releases.

With Antora we found a toolchain which addresses these needs. It allows us to maintain different versions of the documentation right next to the actual code and aggregate them on the website. Going forward, Debezium documentation can be found at Reference documentation. This page allows visitors to quickly navigate to documentation for a specific version. Once in the documentation, you can quickly navigate between various Debezium versions easily. Built by the friendly folks behind AsciiDoctor, Antora comes with lots of well thought out details; e.g. there’s an "Edit this Page" link on each page, which makes it very easy to create GitHub pull requests with documentation fixes.

Finally, we also took the time to fill the long-standing place holder pages describing the Debezium Architecture and Features with some actual contents. Woohoo!

Feedback

We certainly hope these recent changes make it much easier for the community.

If something is unclear, could be improved, or worse a link that isn’t working, we welcome feedback. You can report such concerns to us by opening an issue for our website.

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Site and Documentation Overhaul

This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

New Releases Page

It is important that the Debezium community be able to find information easily about a given release series. We have introduced a new Releases area on the site that describes details about each release series (e.g. 0.9, the current stable release and 0.10, the current development release) such as:

  • What database or Apache Kafka (Connect) platforms were tested

  • What Java version is supported

  • How to migrate to a specific release series

  • Where to download the connectors or other series artifacts

  • What changes were introduced in that series

  • And much more…​

The goal is to make it simple and easy to find all information about a specific release series in a single place.

New Documentation

Probably one of the most limiting factors with how our documentation was published previously is that it focused solely on the idea of the latest stable version. Also the documentation sources were separated from the actual code sources in the main code repository. This presented several drawbacks:

  • Confusing to users of older releases

  • Prevented publishing documentation for development versions

  • Caused friction for contributors when implementing new features that need documentation updates

What we felt we needed to provide the community was documentation published by version. This would allow documentation to be tailored specific to that version, allowing fluid changes for future versions without impact to prior versions.

Such a solution also has the benefit that it enables the Debezium team to publish development version documentation easily, which is a critical step in helping users who test unstable releases.

With Antora we found a toolchain which addresses these needs. It allows us to maintain different versions of the documentation right next to the actual code and aggregate them on the website. Going forward, Debezium documentation can be found at Reference documentation. This page allows visitors to quickly navigate to documentation for a specific version. Once in the documentation, you can quickly navigate between various Debezium versions easily. Built by the friendly folks behind AsciiDoctor, Antora comes with lots of well thought out details; e.g. there’s an "Edit this Page" link on each page, which makes it very easy to create GitHub pull requests with documentation fixes.

Finally, we also took the time to fill the long-standing place holder pages describing the Debezium Architecture and Features with some actual contents. Woohoo!

Feedback

We certainly hope these recent changes make it much easier for the community.

If something is unclear, could be improved, or worse a link that isn’t working, we welcome feedback. You can report such concerns to us by opening an issue for our website.

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/09/10/debezium-0-10-0-cr1-released/index.html b/blog/2019/09/10/debezium-0-10-0-cr1-released/index.html index 1c2ffc29a3..2cedeb6434 100644 --- a/blog/2019/09/10/debezium-0-10-0-cr1-released/index.html +++ b/blog/2019/09/10/debezium-0-10-0-cr1-released/index.html @@ -1 +1 @@ - Debezium 0.10.0.CR1 Released

The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

Exported Snapshots for Postgres

One capability of Postgres' logical decoding facility that has not been leveraged by Debezium so far is the notion of exported snapshots: when a replication slot is created, a transaction with SNAPSHOT isolation mode can be started, which allows to export tables from the database without taking any locks, exactly at the moment when the slot is created.

Based on earlier work which allows for much more flexibility in regards to how snapshot are handled by the Debezium Postgres connector, the exported snapshot functionality can now be used via the new snapshot mode exported (DBZ-1035, DBZ-1440). We encourage you to give this a test ride and report back any issues.

Eventually, this mode should become the default snapshotting behavior, as it doesn’t require the connector to obtain any locks.

Bugfixes

Due to a value overflow multiple users had reported issues when capturing the values of temporal columns with values far out in the future, e.g. years after 3000 (DBZ-949, DBZ-1205, DBZ-1255).

While such values are perhaps not too common in typical enterprise applications (after all, who wants to have their purchase order delivery scheduled in a thousand years from now), there’s still several use cases working with such values, e.g. think of potentially large half-life times when modelling nuclear decay processes. So we did a larger refactoring of the code dealing with temporal values, and we are happy to report that these issues have been fixed now.

Other bugfixes were done in regards to dealing with "no-op" events in MongoDB (DBZ-1464) and the recently added propagation of source fields to outgoing message when using the new record state extraction SMT (DBZ-1448).

Cassandra Connector

After the initial release of the Debezium connector for Cassandra, work has begun to further align it with the other Debezium connectors (unlike the relational connectors and the one for MongoDB, the Cassandra connector currently is not based on Apache Kafka Connect, but runs as a stand-alone process). The first outcome of this is that its configured via a properties file now (similar to using Kafka Connect in standalone mode) and not via a YAML file. (DBZ-1406).

The next step will be to make the aspect of message serialization configurable: while currently only Avro is supported by the connector, it will eventually support the notion of pluggable converters, allowing you to use the JSON, Avro and any other converters you may already know from Kafka Connect.

Reworked Website and Documentation

When reading this blog post, it’s hard to miss: the Debezium website has received a facelift.

Information for the current stable and development releases (0.9 and 0.10 at this time) is much easier to find now. Also the documentation has been re-organized and is published in version-specific approach now, i.e. you can now obtain the specific documentation applying for a particular release.

The blog section of the website has been reworked, too: the main page shows an introductory snippet for the most recent posts, whereas a number of "featured" blog posts is listed on the left. These are typically earlier blog posts which explore advanced topics such as the outbox pattern in depth and which we wanted to make easier to discover and consume. We hope you like the new website and documentation structure and it helps you to find all the information you’re looking for better than before. If you run into any issues (formatting glitches, broken links etc.), please let us know.

Refer to the 0.10.0.CR1 release notes for the complete list of addressed issues and the upgrading procedure.

As always, many thanks to all the awesome people from the Debezium community who contributed to this release: Andrew Garrett, Bingqin Zhou Cyril Scetbon, Guillaume Rosauro, Ivan Luzyanin, Lev Zemlyanov and Renato Mefi!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.10.0.CR1 Released

The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

Exported Snapshots for Postgres

One capability of Postgres' logical decoding facility that has not been leveraged by Debezium so far is the notion of exported snapshots: when a replication slot is created, a transaction with SNAPSHOT isolation mode can be started, which allows to export tables from the database without taking any locks, exactly at the moment when the slot is created.

Based on earlier work which allows for much more flexibility in regards to how snapshot are handled by the Debezium Postgres connector, the exported snapshot functionality can now be used via the new snapshot mode exported (DBZ-1035, DBZ-1440). We encourage you to give this a test ride and report back any issues.

Eventually, this mode should become the default snapshotting behavior, as it doesn’t require the connector to obtain any locks.

Bugfixes

Due to a value overflow multiple users had reported issues when capturing the values of temporal columns with values far out in the future, e.g. years after 3000 (DBZ-949, DBZ-1205, DBZ-1255).

While such values are perhaps not too common in typical enterprise applications (after all, who wants to have their purchase order delivery scheduled in a thousand years from now), there’s still several use cases working with such values, e.g. think of potentially large half-life times when modelling nuclear decay processes. So we did a larger refactoring of the code dealing with temporal values, and we are happy to report that these issues have been fixed now.

Other bugfixes were done in regards to dealing with "no-op" events in MongoDB (DBZ-1464) and the recently added propagation of source fields to outgoing message when using the new record state extraction SMT (DBZ-1448).

Cassandra Connector

After the initial release of the Debezium connector for Cassandra, work has begun to further align it with the other Debezium connectors (unlike the relational connectors and the one for MongoDB, the Cassandra connector currently is not based on Apache Kafka Connect, but runs as a stand-alone process). The first outcome of this is that its configured via a properties file now (similar to using Kafka Connect in standalone mode) and not via a YAML file. (DBZ-1406).

The next step will be to make the aspect of message serialization configurable: while currently only Avro is supported by the connector, it will eventually support the notion of pluggable converters, allowing you to use the JSON, Avro and any other converters you may already know from Kafka Connect.

Reworked Website and Documentation

When reading this blog post, it’s hard to miss: the Debezium website has received a facelift.

Information for the current stable and development releases (0.9 and 0.10 at this time) is much easier to find now. Also the documentation has been re-organized and is published in version-specific approach now, i.e. you can now obtain the specific documentation applying for a particular release.

The blog section of the website has been reworked, too: the main page shows an introductory snippet for the most recent posts, whereas a number of "featured" blog posts is listed on the left. These are typically earlier blog posts which explore advanced topics such as the outbox pattern in depth and which we wanted to make easier to discover and consume. We hope you like the new website and documentation structure and it helps you to find all the information you’re looking for better than before. If you run into any issues (formatting glitches, broken links etc.), please let us know.

Refer to the 0.10.0.CR1 release notes for the complete list of addressed issues and the upgrading procedure.

As always, many thanks to all the awesome people from the Debezium community who contributed to this release: Andrew Garrett, Bingqin Zhou Cyril Scetbon, Guillaume Rosauro, Ivan Luzyanin, Lev Zemlyanov and Renato Mefi!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/09/26/debezium-0-10-0-cr2-released/index.html b/blog/2019/09/26/debezium-0-10-0-cr2-released/index.html index 38848f1292..e7fd525d8f 100644 --- a/blog/2019/09/26/debezium-0-10-0-cr2-released/index.html +++ b/blog/2019/09/26/debezium-0-10-0-cr2-released/index.html @@ -1 +1 @@ - Debezium 0.10.0.CR2 Released

I’m very happy to announce the release of Debezium 0.10.0.CR2!

After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

As usual, let’s take a closer look at some of the new features and resolved bugs.

Customizable Message Keys

Being able to configure which columns of a table should go into the key of corresponding Kafka messages has been a long-standing feature request (DBZ-1015). To recap, by default the message key of Debezium’s data change events will contain the primary key column(s) of the represented table. That’s a sensible default, but sometimes more flexibility is desirable:

  • Some tables don’t have a primary key, esp. in legacy data models

  • Choosing a different message key than the primary key may facilitate stream processing applications that operate on multiple change data topics

The second case is especially of interest when building Kafka Streams applications that join multiple CDC topics: in general, topic joins can only be done if the message key is the same on both sides of the join.

For instance let’s assume we have two tables, Customer and CustomerDetails with different primary keys and a foreign key relationship from CustomerDetails to Customer. By choosing that foreign key column as the message key for customer detail change events, the two table streams could be joined without the need for re-keying the customer details topic. To do so, the new message.key.columns option can be used like so:

message.key.columns=dbserver1.inventory.customerdetails:CustomerId

Customizable message keys are supported for all the relational Debezium connectors.

Pluggable Serializers for Cassandra

From previous announcements you might remember that Debezium’s Cassandra connector is a bit different from the other ones, as it’s not based on the Kafka Connect framework. As such, until now it didn’t support the notion of configurable message serializers; Avro was the only supported message format.

As part of the ongoing efforts to align the Cassandra connector more closely with the other ones, it now allows to configure different serializers, so you also could use JSON, ProtoBuf and other formats (DBZ-1405). The serializer framework from Kafka Connect is reused for that, so you can leverage all the existing serializers and configure them exactly the same way as done for any other connector.

Improved handling of Postgres TOAST Columns

TOAST columns are a mechanism in Postgres for dealing with column values that exceed the page size limit (typically 8 KB). While the usage of TOAST is transparent when interacting with the database itself, this is not the case when obtaining change events via logical decoding. As TOASTed values are not stored within the physical data row itself, logical decoding does not expose the value of unchanged TOAST columns (unless the column is part of the table’s replica identity).

This situation used to be handled in different ways by the the logical decoding plug-ins supported by Debezium (pgoutput, decoderbufs and wal2json), one approach being the retrieval of such column "out of bands".

Unfortunately, there’s no way for savely doing this when considering concurrent writes to such record. So we reworked how TOAST columns are handled: if a TOAST column’s value hasn’t changed and that column isn’t part of the table’s replica identity, its value will not be contained in UPDATE or DELETE events. Instead, a configurable marker value will be exported in this case (defaulting to __debezium_unavailable_value).

This avoids the race conditions that were possible before, but of course raises the question how consumers should deal with this marker value. There are multiple possible answers to that:

  • The value could simply be ignored; for instance a consumer that writes change events to a database, may omit that column from the UPDATE statement it issues

  • When not working with dynamic updates, a trigger may be installed in a sink database, that ignores any updates that would set a column value to the marker, keeping the previous value

  • When actually requiring complete change events including any TOAST column within the Kafka change data topic itself, a stateful Kafka Streams application could be built which hydrates incoming change events with the marker value based on the previous column value persisted in a state store

Thinking about it, the last approach might be an interesting topic for a future blog post :-)

Bugfixes and Other Changes

Besides these feature implementations, this release contains a number of bugfixes, too:

  • When using the pgoutput logical decoding plug-in for Postgres, custom publication names are supported (DBZ-1436)

  • The Postgres connector will retry for a configurable period of time to obtain a replication slot, which can be helpful when rebalancing existing connectors in a cluster (DBZ-1426)

  • Reserved characters in column names can be replaced when using Avro as message format (DBZ-1044)

  • Default values without the time part for MySQL DATETIME columns are supported now (DBZ-1501)

  • MySQL CREATE DATABASE and CREATE TABLE statements with default character sets are supported (DBZ-1470)

Testing for MongoDB has been expanded to also cover version 4.2 (DBZ-1389), and the Postgres driver has been updated to the latest and greatest version 42.2.7 (DBZ-1462). We’re also happy to report that going forward, the Debezium container images are also available in the quay.io container registry (DBZ-1178).

Overall, not less than 30 issues were fixed in the 0.10 CR2 release. Please refer to the release notes for the complete list of addressed issues and the upgrading procedure.

This release wouldn’t be possible without all the fantastic people from the Debezium community who contributed: Bingqin Zhou, Ching Tsai, Guillaume Rosauro, Javier Holguera Jaromir Hamala, Josh Arenberg and Taylor Rolison.

Many thanks to all of you!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.10.0.CR2 Released

I’m very happy to announce the release of Debezium 0.10.0.CR2!

After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

As usual, let’s take a closer look at some of the new features and resolved bugs.

Customizable Message Keys

Being able to configure which columns of a table should go into the key of corresponding Kafka messages has been a long-standing feature request (DBZ-1015). To recap, by default the message key of Debezium’s data change events will contain the primary key column(s) of the represented table. That’s a sensible default, but sometimes more flexibility is desirable:

  • Some tables don’t have a primary key, esp. in legacy data models

  • Choosing a different message key than the primary key may facilitate stream processing applications that operate on multiple change data topics

The second case is especially of interest when building Kafka Streams applications that join multiple CDC topics: in general, topic joins can only be done if the message key is the same on both sides of the join.

For instance let’s assume we have two tables, Customer and CustomerDetails with different primary keys and a foreign key relationship from CustomerDetails to Customer. By choosing that foreign key column as the message key for customer detail change events, the two table streams could be joined without the need for re-keying the customer details topic. To do so, the new message.key.columns option can be used like so:

message.key.columns=dbserver1.inventory.customerdetails:CustomerId

Customizable message keys are supported for all the relational Debezium connectors.

Pluggable Serializers for Cassandra

From previous announcements you might remember that Debezium’s Cassandra connector is a bit different from the other ones, as it’s not based on the Kafka Connect framework. As such, until now it didn’t support the notion of configurable message serializers; Avro was the only supported message format.

As part of the ongoing efforts to align the Cassandra connector more closely with the other ones, it now allows to configure different serializers, so you also could use JSON, ProtoBuf and other formats (DBZ-1405). The serializer framework from Kafka Connect is reused for that, so you can leverage all the existing serializers and configure them exactly the same way as done for any other connector.

Improved handling of Postgres TOAST Columns

TOAST columns are a mechanism in Postgres for dealing with column values that exceed the page size limit (typically 8 KB). While the usage of TOAST is transparent when interacting with the database itself, this is not the case when obtaining change events via logical decoding. As TOASTed values are not stored within the physical data row itself, logical decoding does not expose the value of unchanged TOAST columns (unless the column is part of the table’s replica identity).

This situation used to be handled in different ways by the the logical decoding plug-ins supported by Debezium (pgoutput, decoderbufs and wal2json), one approach being the retrieval of such column "out of bands".

Unfortunately, there’s no way for savely doing this when considering concurrent writes to such record. So we reworked how TOAST columns are handled: if a TOAST column’s value hasn’t changed and that column isn’t part of the table’s replica identity, its value will not be contained in UPDATE or DELETE events. Instead, a configurable marker value will be exported in this case (defaulting to __debezium_unavailable_value).

This avoids the race conditions that were possible before, but of course raises the question how consumers should deal with this marker value. There are multiple possible answers to that:

  • The value could simply be ignored; for instance a consumer that writes change events to a database, may omit that column from the UPDATE statement it issues

  • When not working with dynamic updates, a trigger may be installed in a sink database, that ignores any updates that would set a column value to the marker, keeping the previous value

  • When actually requiring complete change events including any TOAST column within the Kafka change data topic itself, a stateful Kafka Streams application could be built which hydrates incoming change events with the marker value based on the previous column value persisted in a state store

Thinking about it, the last approach might be an interesting topic for a future blog post :-)

Bugfixes and Other Changes

Besides these feature implementations, this release contains a number of bugfixes, too:

  • When using the pgoutput logical decoding plug-in for Postgres, custom publication names are supported (DBZ-1436)

  • The Postgres connector will retry for a configurable period of time to obtain a replication slot, which can be helpful when rebalancing existing connectors in a cluster (DBZ-1426)

  • Reserved characters in column names can be replaced when using Avro as message format (DBZ-1044)

  • Default values without the time part for MySQL DATETIME columns are supported now (DBZ-1501)

  • MySQL CREATE DATABASE and CREATE TABLE statements with default character sets are supported (DBZ-1470)

Testing for MongoDB has been expanded to also cover version 4.2 (DBZ-1389), and the Postgres driver has been updated to the latest and greatest version 42.2.7 (DBZ-1462). We’re also happy to report that going forward, the Debezium container images are also available in the quay.io container registry (DBZ-1178).

Overall, not less than 30 issues were fixed in the 0.10 CR2 release. Please refer to the release notes for the complete list of addressed issues and the upgrading procedure.

This release wouldn’t be possible without all the fantastic people from the Debezium community who contributed: Bingqin Zhou, Ching Tsai, Guillaume Rosauro, Javier Holguera Jaromir Hamala, Josh Arenberg and Taylor Rolison.

Many thanks to all of you!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/10/01/audit-logs-with-change-data-capture-and-stream-processing/index.html b/blog/2019/10/01/audit-logs-with-change-data-capture-and-stream-processing/index.html index 17c4203b06..4af14aff16 100644 --- a/blog/2019/10/01/audit-logs-with-change-data-capture-and-stream-processing/index.html +++ b/blog/2019/10/01/audit-logs-with-change-data-capture-and-stream-processing/index.html @@ -318,4 +318,4 @@ "usecase": "UPDATE VEGETABLE", "user_name": "farmermargaret" } -}

Of course, the buffer processing logic may be adjusted as per your specific requirements; for instance instead of indefinitely waiting for corresponding transaction metadata, we may also decide that it makes more sense to propagate change events unenriched after some waiting time or to raise an exception indicating the missing metadata.

In order to see whether the buffering works as expected, you could do a small experiment: modify a vegetable record using SQL directly in the database. Debezium will capture the event, but as there’s no corresponding transaction metadata provided, the event will not be forwarded to the enriched vegetables topic. If you add another vegetable using the REST API, this one also will not be propagated: although there is a metadata record for it, it’s blocked by the other change event. Only once you have inserted a metadata record for the first change’s transaction into the transaction_context_data table, both change events will be processed and sent to the output topic.

Summary

In this blog post we’ve discussed how change data capture in combination with stream processing can be used to build audit logs in an efficient, low-overhead way. In contrast to library and trigger-based approaches, the events that form the audit trail are retrieved via CDC from the database’s transaction logs, and apart from the insertion of a single metadata record per transaction (which in similar form would be required for any kind of audit log), no overhead to OLTP transactions is incurred. Also audit log entries can be obtained when data records are subject to bulk updates or deletes, something typically not possible with library-based auditing solutions.

Additional metadata that typically should be part of an audit log, can be provided by the application via a separate table, which also is captured via Debezium. With the help of Kafka Streams the actual data change events can be enriched with the data from that metadata table.

One aspect we haven’t discussed yet is querying the audit trail entries, e.g. to examine specific earlier versions of the data. To do so, the enriched change data events typically would be stored in a queryable database. Unlike a basic data replication pipeline, not only the latest version of each record would be stored in the database in that case, but all the versions, i.e. the primary keys typically would be amended with the transaction id of each change. This would allow to select single data records or even joins of multiple tables to get the data valid as per a given transaction id. How this could be implemented in detail may be discussed in a future post.

Your feedback on this approach for building audit logs is very welcomed, just post a comment below. To get started with your own implementation, you can check out the code in the Debezium examples repository on GitHub.

Many thanks to Chris Cranford, Hans-Peter Grahsl, Ashhar Hasan, Anna McDonald and Jiri Pechanec for their feedback while working on this post and the accompanying example code!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +}

Of course, the buffer processing logic may be adjusted as per your specific requirements; for instance instead of indefinitely waiting for corresponding transaction metadata, we may also decide that it makes more sense to propagate change events unenriched after some waiting time or to raise an exception indicating the missing metadata.

In order to see whether the buffering works as expected, you could do a small experiment: modify a vegetable record using SQL directly in the database. Debezium will capture the event, but as there’s no corresponding transaction metadata provided, the event will not be forwarded to the enriched vegetables topic. If you add another vegetable using the REST API, this one also will not be propagated: although there is a metadata record for it, it’s blocked by the other change event. Only once you have inserted a metadata record for the first change’s transaction into the transaction_context_data table, both change events will be processed and sent to the output topic.

Summary

In this blog post we’ve discussed how change data capture in combination with stream processing can be used to build audit logs in an efficient, low-overhead way. In contrast to library and trigger-based approaches, the events that form the audit trail are retrieved via CDC from the database’s transaction logs, and apart from the insertion of a single metadata record per transaction (which in similar form would be required for any kind of audit log), no overhead to OLTP transactions is incurred. Also audit log entries can be obtained when data records are subject to bulk updates or deletes, something typically not possible with library-based auditing solutions.

Additional metadata that typically should be part of an audit log, can be provided by the application via a separate table, which also is captured via Debezium. With the help of Kafka Streams the actual data change events can be enriched with the data from that metadata table.

One aspect we haven’t discussed yet is querying the audit trail entries, e.g. to examine specific earlier versions of the data. To do so, the enriched change data events typically would be stored in a queryable database. Unlike a basic data replication pipeline, not only the latest version of each record would be stored in the database in that case, but all the versions, i.e. the primary keys typically would be amended with the transaction id of each change. This would allow to select single data records or even joins of multiple tables to get the data valid as per a given transaction id. How this could be implemented in detail may be discussed in a future post.

Your feedback on this approach for building audit logs is very welcomed, just post a comment below. To get started with your own implementation, you can check out the code in the Debezium examples repository on GitHub.

Many thanks to Chris Cranford, Hans-Peter Grahsl, Ashhar Hasan, Anna McDonald and Jiri Pechanec for their feedback while working on this post and the accompanying example code!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/10/02/debezium-0-10-0-final-released/index.html b/blog/2019/10/02/debezium-0-10-0-final-released/index.html index 4b1cc5db33..df3db1748b 100644 --- a/blog/2019/10/02/debezium-0-10-0-final-released/index.html +++ b/blog/2019/10/02/debezium-0-10-0-final-released/index.html @@ -1 +1 @@ - Debezium 0.10 Final Released

On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

In addition, we also removed some previously deprecated options and did some clean-up of the message structures produced by the Debezium connectors, e.g. in regards to certain type mappings. When upgrading from earlier releases, please make sure to carefully study the release notes, which discuss in detail any changed or removed functionality, as well as options which for instance allow to keep the original source structure for some time.

Pleas refer to the orignal announcements for more details (Alpha1, Alpha2, Beta1, Beta2, Beta3, Beta4, CR1, CR2). Altogether, a whopping 171 issues were resolved in Debezium 0.10.

Such effort would not nearly be possible without all the fantastic people in the Debezium community. Until today, almost 130 people have contributed to the main Debezium code repository, plus some more to the incubator and container image repositories. But submitting pull requests with code changes is not the only way to help, we’re equally thankful for each bug report, feature request, suggestions in the chat rooms etc.

Another great thing to observe is the growing number of blog posts, conference presentations and other material covering Debezium in one way or another. We maintain a list of Debezium-related resources on the website; if you know of other contents which should be linked there, please file a PR for adding it.

I can’t begin to express how lucky we feel about all these amazing contributions, no matter whether small or large!

Coming Next: Debezium 1.0!

With Debezium 0.10.0.Final being done, the question is: what’s next? If you thought 0.11, then we got to disappoint you — we’re finally setting course towards Debezium 1.0!

With all the community feedback we got (in parts from huge deployments with hundreds of Debezium connectors), and with the clean-up changes done for 0.10, we feel that it’s finally about time for the 1.0 release and the even increased expectations towards it in regards to compatibility and stability.

We don’t expect many new functionality for 1.0 in comparison over 0.10 (with exception of the incubating connectors), the focus will primarily be on further bug fixing, stability and usability improvements. In the good old tradition of open source, we don’t specify any timeline other than "it’s done, when it’s done". But it should be safe to say that it will be done quicker than 0.10: going forward, we’d like to increase the release cadence and publish new minor releases more often, for sure doing less than eight preview releases as in 0.10. Any contributions, input on the roadmap and other feedback will be very welcomed of course.

Upwards and onwards!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 0.10 Final Released

On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

In addition, we also removed some previously deprecated options and did some clean-up of the message structures produced by the Debezium connectors, e.g. in regards to certain type mappings. When upgrading from earlier releases, please make sure to carefully study the release notes, which discuss in detail any changed or removed functionality, as well as options which for instance allow to keep the original source structure for some time.

Pleas refer to the orignal announcements for more details (Alpha1, Alpha2, Beta1, Beta2, Beta3, Beta4, CR1, CR2). Altogether, a whopping 171 issues were resolved in Debezium 0.10.

Such effort would not nearly be possible without all the fantastic people in the Debezium community. Until today, almost 130 people have contributed to the main Debezium code repository, plus some more to the incubator and container image repositories. But submitting pull requests with code changes is not the only way to help, we’re equally thankful for each bug report, feature request, suggestions in the chat rooms etc.

Another great thing to observe is the growing number of blog posts, conference presentations and other material covering Debezium in one way or another. We maintain a list of Debezium-related resources on the website; if you know of other contents which should be linked there, please file a PR for adding it.

I can’t begin to express how lucky we feel about all these amazing contributions, no matter whether small or large!

Coming Next: Debezium 1.0!

With Debezium 0.10.0.Final being done, the question is: what’s next? If you thought 0.11, then we got to disappoint you — we’re finally setting course towards Debezium 1.0!

With all the community feedback we got (in parts from huge deployments with hundreds of Debezium connectors), and with the clean-up changes done for 0.10, we feel that it’s finally about time for the 1.0 release and the even increased expectations towards it in regards to compatibility and stability.

We don’t expect many new functionality for 1.0 in comparison over 0.10 (with exception of the incubating connectors), the focus will primarily be on further bug fixing, stability and usability improvements. In the good old tradition of open source, we don’t specify any timeline other than "it’s done, when it’s done". But it should be safe to say that it will be done quicker than 0.10: going forward, we’d like to increase the release cadence and publish new minor releases more often, for sure doing less than eight preview releases as in 0.10. Any contributions, input on the roadmap and other feedback will be very welcomed of course.

Upwards and onwards!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/10/08/handling-unchanged-postgres-toast-values/index.html b/blog/2019/10/08/handling-unchanged-postgres-toast-values/index.html index 5b56577407..d1dc637bbd 100644 --- a/blog/2019/10/08/handling-unchanged-postgres-toast-values/index.html +++ b/blog/2019/10/08/handling-unchanged-postgres-toast-values/index.html @@ -132,4 +132,4 @@ public void close() { } } -}
1 Set up a state store for storing the latest biography value per customer id
2 The actual streaming pipeline: for each message on the customers topic, apply the logic for replacing the TOAST column marker value and write the transformed message to an output topic
3 Check whether the biography value from the incoming message is the marker
4 If so, get the current biography value for the customer from the state store
5 Replace the marker value with the actual value obtained from the state store
6 If the incoming message has an actual biography value, put this to the state store

Now, if a consumer subscribes to the "enriched" topic, it will see any customer change events with the actual value of any unchanged TOAST columns, as materialized from the state store. The fact that the Debezium connector originally emitted the special marker value, is fully transparent at that point.

Primary Key Changes

When a record’s primary key gets updated, Debezium will create two change events: one "delete" event using the old key and one "insert" event with the new key. When processing the second event, the stream processing application will not be able to look up the biography value stored earlier on, as it has been under the old key.

One way to address this would be to expose the original key value e.g. as a message header of the insert event. This requirement is tracked as DBZ-1531; let us know if you’d like to contribute and implement this feature.

When to Use What?

We’ve discussed different options for dealing with unchanged TOAST column values in Debezium’s data change events. Which one should be used in which case then?

Changing the replica identity to FULL is the easiest approach by far: a single configuration to the source table avoids the problem to begin with. It’s not the most efficient solution, though, and some DBAs might be reluctant to apply this setting.

When using the change events to update some kind of sink data store, it may sound attractive at first to simply omit any field with the special marker value when issuing an update. But this technique has some downsides: not all data stores and the corresponding connectors might support partial updates. Instead there might only be the option to do full updates to a record in the sink data store based on the incoming data. Even when that option exists, it might be sub-optimal. E.g. for a SQL database, a statement just with the available values may be executed. This is at odds with efficient usage of prepared statements and batching, though: as the "shape" of the data may change between two updates to the same table, the same prepared statement cannot be re-used and performance may suffer.

The trigger-based approach isn’t prone to these problems: any updates to a table will have the same number of columns, so the consumer (e.g. a sink connector) may re-use the same prepared statement and batch multiple records into a single execution. One thing to be aware of is the organizational cost associated with this approach: triggers must be installed for each affected column and be kept in sync when table structures change. This must be done individually in each sink datastore, and not all stores have may have support for triggers to begin with. But where possible, triggers can be a great solution.

Finally, stream processing makes the usage of TOAST-able columns and the absence of their values in update events fully transparent to consumers. The enrichment logic is implemented in a single place, from which all the consumers of the change event stream benefit, without the need for individual solutions in each one of them. Also, it’s the only viable solution if consumers themselves are stateless and don’t have any way to materialize the last value of such column, e.g. when streaming change events to a browser via web sockets or GraphQL subscriptions. The price to pay is the overhead of maintaining and operating a separate service.

On a side note, such stream processing application might also be provided as a configurable, ready-to-use component coming as a part of the Debezium platform. This might be useful not only for Postgres, but also when thinking about other Debezium connectors. For instance, in case of Cassandra, change events will only ever contain the updated fields; a similar mode could be envisioned for MySQL by supporting its "non full" binlog mode. In both cases, a stateful stream processing service could be used to hydrate full data change events based on earlier row state retrieved from a local state store and an incoming "patch" style change event. If you think that’d be a useful addition to Debezium, please let us know.

As always, there are no silver bullets: you should choose a solution based on your specific situation and requirements. As a starting point you can find a basic implementation of the trigger and Kafka Streams approaches in the Debezium examples repository.

Which approach would you prefer? Or perhaps you have even further alternatives in mind? Tell us about it in the comments below.

Many thanks to Dave Cramer and Jiri Pechanec for their feedback while working on this post and the accompanying example code!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +}
1 Set up a state store for storing the latest biography value per customer id
2 The actual streaming pipeline: for each message on the customers topic, apply the logic for replacing the TOAST column marker value and write the transformed message to an output topic
3 Check whether the biography value from the incoming message is the marker
4 If so, get the current biography value for the customer from the state store
5 Replace the marker value with the actual value obtained from the state store
6 If the incoming message has an actual biography value, put this to the state store

Now, if a consumer subscribes to the "enriched" topic, it will see any customer change events with the actual value of any unchanged TOAST columns, as materialized from the state store. The fact that the Debezium connector originally emitted the special marker value, is fully transparent at that point.

Primary Key Changes

When a record’s primary key gets updated, Debezium will create two change events: one "delete" event using the old key and one "insert" event with the new key. When processing the second event, the stream processing application will not be able to look up the biography value stored earlier on, as it has been under the old key.

One way to address this would be to expose the original key value e.g. as a message header of the insert event. This requirement is tracked as DBZ-1531; let us know if you’d like to contribute and implement this feature.

When to Use What?

We’ve discussed different options for dealing with unchanged TOAST column values in Debezium’s data change events. Which one should be used in which case then?

Changing the replica identity to FULL is the easiest approach by far: a single configuration to the source table avoids the problem to begin with. It’s not the most efficient solution, though, and some DBAs might be reluctant to apply this setting.

When using the change events to update some kind of sink data store, it may sound attractive at first to simply omit any field with the special marker value when issuing an update. But this technique has some downsides: not all data stores and the corresponding connectors might support partial updates. Instead there might only be the option to do full updates to a record in the sink data store based on the incoming data. Even when that option exists, it might be sub-optimal. E.g. for a SQL database, a statement just with the available values may be executed. This is at odds with efficient usage of prepared statements and batching, though: as the "shape" of the data may change between two updates to the same table, the same prepared statement cannot be re-used and performance may suffer.

The trigger-based approach isn’t prone to these problems: any updates to a table will have the same number of columns, so the consumer (e.g. a sink connector) may re-use the same prepared statement and batch multiple records into a single execution. One thing to be aware of is the organizational cost associated with this approach: triggers must be installed for each affected column and be kept in sync when table structures change. This must be done individually in each sink datastore, and not all stores have may have support for triggers to begin with. But where possible, triggers can be a great solution.

Finally, stream processing makes the usage of TOAST-able columns and the absence of their values in update events fully transparent to consumers. The enrichment logic is implemented in a single place, from which all the consumers of the change event stream benefit, without the need for individual solutions in each one of them. Also, it’s the only viable solution if consumers themselves are stateless and don’t have any way to materialize the last value of such column, e.g. when streaming change events to a browser via web sockets or GraphQL subscriptions. The price to pay is the overhead of maintaining and operating a separate service.

On a side note, such stream processing application might also be provided as a configurable, ready-to-use component coming as a part of the Debezium platform. This might be useful not only for Postgres, but also when thinking about other Debezium connectors. For instance, in case of Cassandra, change events will only ever contain the updated fields; a similar mode could be envisioned for MySQL by supporting its "non full" binlog mode. In both cases, a stateful stream processing service could be used to hydrate full data change events based on earlier row state retrieved from a local state store and an incoming "patch" style change event. If you think that’d be a useful addition to Debezium, please let us know.

As always, there are no silver bullets: you should choose a solution based on your specific situation and requirements. As a starting point you can find a basic implementation of the trigger and Kafka Streams approaches in the Debezium examples repository.

Which approach would you prefer? Or perhaps you have even further alternatives in mind? Tell us about it in the comments below.

Many thanks to Dave Cramer and Jiri Pechanec for their feedback while working on this post and the accompanying example code!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/10/17/debezium-1-0-0-beta1-released/index.html b/blog/2019/10/17/debezium-1-0-0-beta1-released/index.html index 479ce40a07..1d24ebe1de 100644 --- a/blog/2019/10/17/debezium-1-0-0-beta1-released/index.html +++ b/blog/2019/10/17/debezium-1-0-0-beta1-released/index.html @@ -1 +1 @@ - Debezium 1.0.0.Beta1 Released

History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

This new Debezium release includes several notable new features, enhancements, and fixes:

  • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

  • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

  • Ensure message keys have the right column order (DBZ-1507)

  • Warn of table locking problems in connector logs (DBZ-1280)

Additionally, several PostgreSQL issues were fixed to improve snapshotting large databases environments (DBZ-685) and specific circumstances where write ahead logs (WAL) would continue to consume diskspace (DBZ-892).

In total, this release contains 18 fixes.

Thanks to all the community members who helped make this happen: Purushotham Pushpavanthar, Jeremy Finzel, Grant Cooksey

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 1.0.0.Beta1 Released

History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

This new Debezium release includes several notable new features, enhancements, and fixes:

  • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

  • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

  • Ensure message keys have the right column order (DBZ-1507)

  • Warn of table locking problems in connector logs (DBZ-1280)

Additionally, several PostgreSQL issues were fixed to improve snapshotting large databases environments (DBZ-685) and specific circumstances where write ahead logs (WAL) would continue to consume diskspace (DBZ-892).

In total, this release contains 18 fixes.

Thanks to all the community members who helped make this happen: Purushotham Pushpavanthar, Jeremy Finzel, Grant Cooksey

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/10/17/debezium-newsletter-02-2019/index.html b/blog/2019/10/17/debezium-newsletter-02-2019/index.html index 7e98dcc7f5..77404c7eb0 100644 --- a/blog/2019/10/17/debezium-newsletter-02-2019/index.html +++ b/blog/2019/10/17/debezium-newsletter-02-2019/index.html @@ -1 +1 @@ - Debezium's Newsletter 02/2019

Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

Articles

There have been quite a number of blog posts about Debezium lately; here are some of the latest ones that you should not miss:

This recent blog by Gunnar Morling discusses how Debezium combined with Kafka stream post processing can deliver an enriched stream of events suitable for even the most complicated of tasks like audit tracking.

Rod Shokrian from Varo recently blogged about their CDC solution and experiences using Debezium in conjunction with the Outbox Pattern.

Community involvement is critical to each Open Source project and Debezium is no different. Joy Gao blogs about her experience at WePay where CDC innovation brought Cassandra and Debezium together.

This slide deck by Anna McDonald showcases Debezium capturing database change events in complex architectures to emit enriched, derivative-based events across your enterprise.

There are quite a number of applicable use cases for Debezium. Dave Cramer from Crunchy Data recently blogged about his experiences using Debezium to replicate data between both a source and sink PostgreSQL environment using CDC and Apache Kafka.

Data analytics are vital across lots of industries. This post by Adrian Kreuziger discusses how Convoy used Debezium and Apache Kafka to design a low-latency data warehouse solution for the trucking industry.

You can also checkout our online resources for more…​

Examples

An example is often much like a picture, its worth a thousand words. Debezium’s examples repository has recently undergone changes introducing new examples and update existing ones:

Time to upgrade

Debezium version 0.10.0.Final was released at the beginning of October. If you are using the 0.9 branch, we urge you to check out the latest major release. For details on the bug fixes, enhancements, and improvements that spanned 8 preview releases, check out the release-notes.

The Debezium team has also begun active development on the next major version, 1.0. If you want details on the bug fixes, enhancements, and improvements, you can view release-notes for more details.

Using Debezium?

We recently added a community users page to https://debezium.io. If you are a user of Debezium and would like to be included, please send us a GitHub pull request or reach out to us directly.

Getting involved

It can often be overwhelming when starting work on an existing code base. We welcome community contributions and we want to make the process of getting started extremely easy. Below is a list of open issues that are currently labeled with easy-starter if you want to dive in quick.

  • Configure Avro serialization automatically when detecting link to schema registry (DBZ-59)

  • Add tests for using fallback values with default REPLICA IDENTITY (DBZ-1158)

  • Add ability to insert fields from op field in ExtractNewRecordState SMT (DBZ-1452)

  • Support CREATE TABLE …​ LIKE syntax for blacklisted source table (DBZ-1496)

  • Provide change event JSON Serde for Kafka Streams (DBZ-1533)

  • Explore SMT for Externalizing large column values (DBZ-1541)

  • Whitespaces not stripped from table.whitelist (DBZ-1546)

Opportunities

We recently tweeted about an Internship opening on the Debezium project.

  • Are you in the Czech Republic area?

  • Are you passionate about Open Source?

  • Do you think change data capture is interesting?

If you answered yes to any or all of these, then you should definitely check out the details and apply!

Feedback

We intend to publish new additions of this newsletter periodically. Should anyone have any suggestions on changes or what could be highlighted here, we welcome that feedback. You can reach out to us via any of our community channels found here.

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium's Newsletter 02/2019

Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

Articles

There have been quite a number of blog posts about Debezium lately; here are some of the latest ones that you should not miss:

This recent blog by Gunnar Morling discusses how Debezium combined with Kafka stream post processing can deliver an enriched stream of events suitable for even the most complicated of tasks like audit tracking.

Rod Shokrian from Varo recently blogged about their CDC solution and experiences using Debezium in conjunction with the Outbox Pattern.

Community involvement is critical to each Open Source project and Debezium is no different. Joy Gao blogs about her experience at WePay where CDC innovation brought Cassandra and Debezium together.

This slide deck by Anna McDonald showcases Debezium capturing database change events in complex architectures to emit enriched, derivative-based events across your enterprise.

There are quite a number of applicable use cases for Debezium. Dave Cramer from Crunchy Data recently blogged about his experiences using Debezium to replicate data between both a source and sink PostgreSQL environment using CDC and Apache Kafka.

Data analytics are vital across lots of industries. This post by Adrian Kreuziger discusses how Convoy used Debezium and Apache Kafka to design a low-latency data warehouse solution for the trucking industry.

You can also checkout our online resources for more…​

Examples

An example is often much like a picture, its worth a thousand words. Debezium’s examples repository has recently undergone changes introducing new examples and update existing ones:

Time to upgrade

Debezium version 0.10.0.Final was released at the beginning of October. If you are using the 0.9 branch, we urge you to check out the latest major release. For details on the bug fixes, enhancements, and improvements that spanned 8 preview releases, check out the release-notes.

The Debezium team has also begun active development on the next major version, 1.0. If you want details on the bug fixes, enhancements, and improvements, you can view release-notes for more details.

Using Debezium?

We recently added a community users page to https://debezium.io. If you are a user of Debezium and would like to be included, please send us a GitHub pull request or reach out to us directly.

Getting involved

It can often be overwhelming when starting work on an existing code base. We welcome community contributions and we want to make the process of getting started extremely easy. Below is a list of open issues that are currently labeled with easy-starter if you want to dive in quick.

  • Configure Avro serialization automatically when detecting link to schema registry (DBZ-59)

  • Add tests for using fallback values with default REPLICA IDENTITY (DBZ-1158)

  • Add ability to insert fields from op field in ExtractNewRecordState SMT (DBZ-1452)

  • Support CREATE TABLE …​ LIKE syntax for blacklisted source table (DBZ-1496)

  • Provide change event JSON Serde for Kafka Streams (DBZ-1533)

  • Explore SMT for Externalizing large column values (DBZ-1541)

  • Whitespaces not stripped from table.whitelist (DBZ-1546)

Opportunities

We recently tweeted about an Internship opening on the Debezium project.

  • Are you in the Czech Republic area?

  • Are you passionate about Open Source?

  • Do you think change data capture is interesting?

If you answered yes to any or all of these, then you should definitely check out the details and apply!

Feedback

We intend to publish new additions of this newsletter periodically. Should anyone have any suggestions on changes or what could be highlighted here, we welcome that feedback. You can reach out to us via any of our community channels found here.

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/10/22/audit-logs-with-kogito/index.html b/blog/2019/10/22/audit-logs-with-kogito/index.html index 2c572ed934..d5538061f6 100644 --- a/blog/2019/10/22/audit-logs-with-kogito/index.html +++ b/blog/2019/10/22/audit-logs-with-kogito/index.html @@ -3,4 +3,4 @@ log-enricher_1 | 2019-10-11 10:30:46,411 INFO [io.deb.dem.aud.enr.ChangeEventEnricher] (auditlog-enricher-c9e5d1bb-d953-42b4-8dc6-bbc328f5344f-StreamThread-1) Processing buffered change event for key {"id":106} log-enricher_1 | 2019-10-11 10:30:46,415 WARN [io.deb.dem.aud.enr.ChangeEventEnricher] (auditlog-enricher-c9e5d1bb-d953-42b4-8dc6-bbc328f5344f-StreamThread-1) No metadata found for transaction {"transaction_id":611} log-enricher_1 | 2019-10-11 10:30:46,921 INFO [io.deb.dem.aud.enr.ChangeEventEnricher] (auditlog-enricher-c9e5d1bb-d953-42b4-8dc6-bbc328f5344f-StreamThread-1) Processing buffered change event for key {"id":106}

Looking at the logs you can identify that it actually refers to the entry we just inserted (id 106). In addition to that, it refers to missing transaction context data that it cannot find. That is the consequence of doing it manually on database level instead of going through the vegetable service. There is no corresponding data in the dbserver1.inventory.transaction_context_data Kafka topic and thus the log enricher cannot correlate and by that merge/enrich them.

Kogito to the rescue

There would be a really good feature (or a neat feature as Gunnar said) if we could have some sort of admin service that could help in resolving this kind of problems. Mainly because if such entry is added it will block the entire enrichment activity as the first missing message will hold off all others.

And here comes Kogito - a cloud native business automation toolkit to build intelligent business applications based on battle tested capabilities. In other words, it brings business processes and rules to solve particular business problems. In this case the business problem is blocked log enrichment which can lead to some lost opportunities (of various types).

What Kogito helps us with is to define our logic to understand what might get wrong, what needs to be done to resolve it and what are the conditions that can lead to both problem and resolution.

In this particular case we use both processes and rules to make sure we get the context right and react to the events behind the vegetable service. To be able to spot the erroneous situations we need to monitor two topics:

  • dbserver1.inventory.vegetable - vegetable data change events

  • dbserver1.inventory.transaction_context_data - events from vegetable service with additional context data

So for that we define two business processes where each will be started based on incoming messages - from individual Kafka topics:

Vegetable events process definition
Transaction context data process definition

As illustrated above, both processes are initiated based on an incoming message. Then the logic afterwards is significantly different.

The "Transaction context data" process is responsible for just retrieving the event and pushing it into processing phase - that essentially means to insert it into the so called "working memory" that is used for rule evaluation. And at that moment it’s done.

The "Vegetable event" process starts in a similar way… it retrieves the message and then (first ignore snapshot messages in the same way as the log enricher service) will wait for a predefined amount of time (2 seconds) before matching vegetable and transaction context events. Once there is a match it will simple finish its execution. But if there is no match found it will create a user task (that’s a task that requires human actors to provide data before process can move forward).

This is done via admin user interface (http://localhost:8085/) that allows to easily spot such instance and work on them to fix missing data.

Admin service UI for fixing missing transaction context data

Once the Use case and User name attributes are provided, the process will create a new transaction context event, push it to the Kafka topic and complete itself.

After the missing transaction context data event has been put on the topic the log enricher will resume its operation and you will be able to see the following lines in the log:

log-enricher_1        | 2019-10-11 10:31:00,385 INFO  [io.deb.dem.aud.enr.ChangeEventEnricher] (auditlog-enricher-c9e5d1bb-d953-42b4-8dc6-bbc328f5344f-StreamThread-1) Processing buffered change event for key {"id":106}
-log-enricher_1        | 2019-10-11 10:31:00,389 INFO  [io.deb.dem.aud.enr.ChangeEventEnricher] (auditlog-enricher-c9e5d1bb-d953-42b4-8dc6-bbc328f5344f-StreamThread-1) Enriched change event for key {"id":106}

With this you can easily administrate the audit logs to make sure any erroneous situations are resolved quickly to not affect any other activities.

And if you would like to see everything in action, just watch this video:

Or try it yourself by running the audit log example.

Maciej Swiderski

Maciej is a software engineer at Red Hat where he leads jBPM project and is also co-founder of the Kogito project.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +log-enricher_1 | 2019-10-11 10:31:00,389 INFO [io.deb.dem.aud.enr.ChangeEventEnricher] (auditlog-enricher-c9e5d1bb-d953-42b4-8dc6-bbc328f5344f-StreamThread-1) Enriched change event for key {"id":106}

With this you can easily administrate the audit logs to make sure any erroneous situations are resolved quickly to not affect any other activities.

And if you would like to see everything in action, just watch this video:

Or try it yourself by running the audit log example.

Maciej Swiderski

Maciej is a software engineer at Red Hat where he leads jBPM project and is also co-founder of the Kogito project.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/10/24/debezium-1-0-0-beta2-released/index.html b/blog/2019/10/24/debezium-1-0-0-beta2-released/index.html index e7d484313f..f583cbbf5e 100644 --- a/blog/2019/10/24/debezium-1-0-0-beta2-released/index.html +++ b/blog/2019/10/24/debezium-1-0-0-beta2-released/index.html @@ -1 +1 @@ - Debezium 1.0.0.Beta2 Released

It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

This new Debezium release includes several notable new features, enhancements, and fixes:

  • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

  • Support for PostgreSQL 12 (DBZ-1542)

  • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

  • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

  • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

  • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

  • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

Additionally the PostgreSQL connector was improved to warn users of a common situation where their configuration does not enable heartbeats and the monitored table(s) change less frequent than tables that are not monitored. This often lead to the write ahead logs (WAL) consuming additional disk space creating a WAL backlog as the connector only flushes LSN information to PostgreSQL if the log contains events for tables that are monitored. Our hope is this will help automated tools identify this problem earlier while also giving hints on how to avoid it.

In total, this release contains 13 fixes.

Thanks to all the community members who helped make this happen: Grant Cooksey, Mingcong Huang, navdeep710

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 1.0.0.Beta2 Released

It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

This new Debezium release includes several notable new features, enhancements, and fixes:

  • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

  • Support for PostgreSQL 12 (DBZ-1542)

  • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

  • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

  • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

  • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

  • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

Additionally the PostgreSQL connector was improved to warn users of a common situation where their configuration does not enable heartbeats and the monitored table(s) change less frequent than tables that are not monitored. This often lead to the write ahead logs (WAL) consuming additional disk space creating a WAL backlog as the connector only flushes LSN information to PostgreSQL if the log contains events for tables that are monitored. Our hope is this will help automated tools identify this problem earlier while also giving hints on how to avoid it.

In total, this release contains 13 fixes.

Thanks to all the community members who helped make this happen: Grant Cooksey, Mingcong Huang, navdeep710

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/11/14/debezium-1-0-0-beta3-released/index.html b/blog/2019/11/14/debezium-1-0-0-beta3-released/index.html index b54147d706..fc250af693 100644 --- a/blog/2019/11/14/debezium-1-0-0-beta3-released/index.html +++ b/blog/2019/11/14/debezium-1-0-0-beta3-released/index.html @@ -1 +1 @@ - Debezium 1.0.0.Beta3 Released

While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

This new Debezium release includes several notable new features, enhancements, and fixes:

  • Built against Kafka Connect 2.3.1 (DBZ-1612)

  • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

  • Standardized source information for Cassandra connector (DBZ-1408)

  • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

  • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

  • Erroneously reporting no tables captured (DBZ-1519)

  • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

  • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

  • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

  • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

  • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

  • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

Additionally there were improvements to the Docker container images to reduce their overall size and some build infrastructure improvements to apply automatic code formatting rules. Details about code formatting changes can be found in the CONTRIBUTE.md file.

In total, this release contains 27 changes.

Thanks to all the community members who helped make this happen: David Feinblum, René Kerner, Luis Garcés-Erice, Jeremy Finzel, Mike Graham, Yang Yang, Addison Higham

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 1.0.0.Beta3 Released

While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

This new Debezium release includes several notable new features, enhancements, and fixes:

  • Built against Kafka Connect 2.3.1 (DBZ-1612)

  • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

  • Standardized source information for Cassandra connector (DBZ-1408)

  • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

  • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

  • Erroneously reporting no tables captured (DBZ-1519)

  • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

  • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

  • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

  • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

  • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

  • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

Additionally there were improvements to the Docker container images to reduce their overall size and some build infrastructure improvements to apply automatic code formatting rules. Details about code formatting changes can be found in the CONTRIBUTE.md file.

In total, this release contains 27 changes.

Thanks to all the community members who helped make this happen: David Feinblum, René Kerner, Luis Garcés-Erice, Jeremy Finzel, Mike Graham, Yang Yang, Addison Higham

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/12/12/debezium-1-0-0-cr1-released/index.html b/blog/2019/12/12/debezium-1-0-0-cr1-released/index.html index 935382d7ac..cdb822b6b1 100644 --- a/blog/2019/12/12/debezium-1-0-0-cr1-released/index.html +++ b/blog/2019/12/12/debezium-1-0-0-cr1-released/index.html @@ -1 +1 @@ - Debezium 1.0.0.CR1 Released

Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

There was a number of bug fixes, too:

  • Graceful handling of an empty history topic for the MySQL connector (DBZ-1201)

  • Correct column filtering for SQL Server connector (DBZ-1617)

  • Support for ALTER TABLE …​ RENAME …​ in MySQL (DBZ-1645)

As were’re approaching the 1.0 Final release, we also took the time to check the configuration options of the different connectors for consistency. Things were in pretty good shape already due to previous work towards unification in Debezium 0.10. Only for the SQL Server and Oracle connectors, the snapshot mode "initial_schema_only" has been deprecated and will be removed in a future version. Please use "schema_only" instead, as known from the MySQL connector (DBZ-585).

Overall, this release contains 24 changes. As always, this release wouldn’t have been possible without the help from folks of the community: Cheng Pan, Collin Van Dyck, Gurnaaz Randhawa, Grzegorz Kołakowski, Ivan San Jose, Theofanis Despoudis and Thomas Deblock.

Thanks a lot to you! In total, not less than 144 people have contributed to the Debezium main code repository at this point. Perhaps we can bump this number to 200 in 2020?

Barring any unforeseen issues, this candidate release should be the only one for Debezium 1.0, and the final version should be in your hands before Christmas. So please give the CR a try and let us know how it works for you!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Debezium 1.0.0.CR1 Released

Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

There was a number of bug fixes, too:

  • Graceful handling of an empty history topic for the MySQL connector (DBZ-1201)

  • Correct column filtering for SQL Server connector (DBZ-1617)

  • Support for ALTER TABLE …​ RENAME …​ in MySQL (DBZ-1645)

As were’re approaching the 1.0 Final release, we also took the time to check the configuration options of the different connectors for consistency. Things were in pretty good shape already due to previous work towards unification in Debezium 0.10. Only for the SQL Server and Oracle connectors, the snapshot mode "initial_schema_only" has been deprecated and will be removed in a future version. Please use "schema_only" instead, as known from the MySQL connector (DBZ-585).

Overall, this release contains 24 changes. As always, this release wouldn’t have been possible without the help from folks of the community: Cheng Pan, Collin Van Dyck, Gurnaaz Randhawa, Grzegorz Kołakowski, Ivan San Jose, Theofanis Despoudis and Thomas Deblock.

Thanks a lot to you! In total, not less than 144 people have contributed to the Debezium main code repository at this point. Perhaps we can bump this number to 200 in 2020?

Barring any unforeseen issues, this candidate release should be the only one for Debezium 1.0, and the final version should be in your hands before Christmas. So please give the CR a try and let us know how it works for you!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/12/13/externalized-secrets/index.html b/blog/2019/12/13/externalized-secrets/index.html index a80fab59ff..e2a5294e9e 100644 --- a/blog/2019/12/13/externalized-secrets/index.html +++ b/blog/2019/12/13/externalized-secrets/index.html @@ -76,4 +76,4 @@ } ], "type": "source" -}

Please refer to the README of the tutorial example for complete instructions.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +}

Please refer to the README of the tutorial example for complete instructions.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2019/12/18/debezium-1-0-0-final-released/index.html b/blog/2019/12/18/debezium-1-0-0-final-released/index.html index e75de88303..729c2f5cac 100644 --- a/blog/2019/12/18/debezium-1-0-0-final-released/index.html +++ b/blog/2019/12/18/debezium-1-0-0-final-released/index.html @@ -1 +1 @@ - Streaming Now- Debezium 1.0 Final Is Out

Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

Why Debezium?

One of the things making it so enjoyable to work on Debezium as a tool for change data capture is the variety of potential use cases. When presenting the project at conferences, it’s just great to see how people quickly get excited when they realize all the possibilities enabled by Debezium and CDC.

In a nutshell, Debezium is one big enabler for letting you react to changes in your data with a low latency. Or, as one conference attendee recently put it, it’s "like the observer pattern, but for your database".

Here’s a few things we’ve seen Debezium being used for as a ingestion component in data streaming pipelines:

  • Replicating data from production databases to other databases and data warehouses

  • Feeding data to search services like Elasticsearch or Apache Solr

  • Updating or invalidating caches

When using Debezium with Apache Kafka and its rich ecosystem of sink connectors, setting up such integrations can be done without any coding, just by means of deploying and configuring connectors in Kafka Connect:

Data Streaming Pipeline With Debezium

But there are many other use cases of CDC which go beyond just moving data from A to B. When adding stream processing into the picture, e.g. via Kafka Streams or Apache Flink, CDC enables you to run time-windowed streaming queries, continuously updated as your operational data changes ("what’s the aggregated order revenue per category within the last hour"). You can use CDC to build audit logs of your data, telling who changed which data items at what time. Or update denormalized views of your data, for the sake of efficient data retrieval, adhering to the CQRS pattern (Command Query Responsibility Segregation).

Finally, CDC can also play a vital role in microservices architectures; exchanging data between services and keeping local views of data owned by other services achieves a higher independence, without having to rely on synchronous API calls. One particularly interesting approach in this context is the outbox pattern, which is well supported by Debezium. In case you don’t start on the green field (who ever does?), CDC can be used to implement the strangler pattern for moving from a monolithic design to microservices.

You can learn more about change data capture use cases with Debezium and Apache Kafka in this presentation from QCon San Francisco.

But you don’t have to take our word for it: you can find lots of blog posts, conference talks and examples by folks using Debezium in production in our compilation of resources. If you’d like to get a glimpse of who else already is using Debezium, see our rapidly growing list of reference users (or send us a pull request to get your name added if your organization already is running Debezium in production).

Debezium 1.0

Now, let’s talk a little bit about the contents of the 1.0 release.

This version continues the effort we began in 0.10 to make sure the emitted event structures and configuration options of the connectors are correct and consistent. While we’ve always been very careful to ensure a smooth upgrading experience, you can expect even more stability in this regard going forward after the 1.0 release.

We’ve expanded the test coverage of databases (Postgres 12, SQL Server 2019, MongoDB 4.2), upgraded our container images to OpenJDK 11 and now build against the latest version of Apache Kafka (2.4.0; earlier versions continue to be supported, too). And last but not least, we’ve also fixed a large number of bugs. Overall, 96 issues were addressed in Debezium 1.0 and its preview releases (Beta1, Beta2, Beta3, CR1).

If you’re on 0.10 right now, the upgrade is mostly a drop-in replacement. When coming from earlier versions, please make sure to read the migration notes to learn about deprecated options, upgrading procedures and more.

The Most Important Part: The Debezium Community

Debezium couldn’t exist without its community of contributors and users. I can’t begin to express how grateful I am for having the chance to be a member of this fantastic community, interacting and working with folks from around the world towards our joint goal of building the leading open-source solution for change data capture.

At this point, about 150 people have contributed to the different Debezium code repositories (please let me know if I’ve missed anybody):

Aaron Rosenberg, Addison Higham, Adrian Kreuziger, Akshath Patkar, Alexander Kovryga, Amit Sela, Andreas Bergmeier, Andras Istvan Nagy, Andrew Garrett, Andrew Tongen, Andrey Pustovetov, Anton Martynov, Arkoprabho Chakraborti, artiship, Ashhar Hasan, Attila Szucs, Barry LaFond, Bartosz Miedlar, Ben Williams, Bin Li, Bingqin Zhou, Braden Staudacher, Brandon Brown, Brandon Maguire, Cheng Pan, Ching Tsai, Chris Cranford, Chris Riccomini, Christian Posta, Chuck Ha, Cliff Wheadon, Collin Van Dyck, Cyril Scetbon, David Chen, David Feinblum, David Leibovic, David Szabo, Deepak Barr, Denis Mikhaylov, Dennis Campagna, Dennis Persson, Duncan Sands, Echo Xu, Eero Koplimets, Emrul Islam, Eric S. Kreiseir, Ewen Cheslack-Postava, Felix Eckhardt, Gagan Agrawal, Grant Cooksey, Guillaume Rosauro, Gunnar Morling, Gurnaaz Randhawa, Grzegorz Kołakowski, Hans-Peter Grahsl, Henryk Konsek, Horia Chiorean, Ian Axelrod, Ilia Bogdanov, Ivan Kovbas, Ivan Lorenz, Ivan Luzyanin, Ivan San Jose, Ivan Vucina, Jakub Cechacek, Jaromir Hamala, Javier Holguera, Jeremy Finzel, Jiri Pechanec, Johan Venant, John Martin, Jon Casstevens, Jordan Bragg, Jork Zijlstra, Josh Arenberg, Josh Stanfield, Joy Gao, Jure Kajzer, Keith Barber, Kevin Pullin, Kewen Chao, Krizhan Mariampillai, Leo Mei, Lev Zemlyanov, Listman Gamboa, Liu Hanlin, Luis Garcés-Erice, Maciej Bryński, MaoXiang Pan, Mario Mueller, Mariusz Strzelecki, Matteo Capitanio, Mathieu Rozieres, Matthias Wessendorf, Mike Graham, Mincong Huang, Moira Tagle, Muhammad Sufyian, Navdeep Agarwal, Nikhil Benesch, Olavi Mustanoja, Oliver Weiler, Olivier Lemasle, Omar Al-Safi, Ori Popowski, Orr Ganani, Peng Lyu, Peter Goransson, Peter Larsson, Philip Sanetra, Pradeep Mamillapalli, Prannoy Mittal, Preethi Sadagopan, pushpavanthar, Raf Liwoch, Ram Satish, Ramesh Reddy, Randall Hauch, Renato Mefi, Roman Kuchar, Sagar Rao, René Kerner, Rich O’Connell, Robert Coup, Sairam Polavarapu, Sanjay Kr Singh, Sanne Grinovero, Satyajit Vegesna, Saulius Valatka, Scofield Xu, Sherafudheen PM, Shivam Sharma, Shubham Rawat, Stanley Shyiko, Stathis Souris, Stephen Powis, Steven Siahetiong, Syed Muhammad Sufyian, Tautvydas Januskevicius, Taylor Rolison, Theofanis Despoudis, Thomas Deblock, Tom Bentley, Tomaz Lemos Fernandes, Tony Rizko, Wang-Yu-Chao, Wei Wu, WenZe Hu, William Pursell, Willie Cheong, Wout Scheepers, Yang Yang, Zheng Wang

You’re amazing, and I would like to wholeheartedly thank each and everyone of you! I’m sure our community will continue to grow in the future — I’d love it if we hit the mark of 200 contributors in 2020.

Equally important are our users; interacting with you in the chat, on the mailing list or at conferences and meet-ups is what helps to drive the direction of the project: learning about your specific requirements and use cases (or bugs you’ve run into) is vital for deciding where to put the focus next. A big thank you to you, too!

Some of you even have shared their experiences with Debezium in conference talks and blog posts. Nothing beats hearing the war stories of others and being able to learn from their experiences, so you speaking about your insights around Debezium and CDC is incredibly helpful and highly appreciated!

What’s Next?

Let’s wrap up this post with a look to see what’s next in store for Debezium.

After some long over-due holidays, we’re planning to begin the work on Debezium 1.1 in January. Some of the potential features you can look forward to are:

  • Support for the CloudEvents specification as a portable event format

  • A Quarkus extension for implementing the outbox pattern

  • A stand-alone Debezium server which will let you stream data change events to messaging infrastructure such as Amazon Kinesis

  • Means of exposing transactional boundaries on a separate topic, allowing to aggregate all the events originating from one source transaction and process them at once

  • Further progression of the incubating community-led connectors for Oracle and Apache Cassandra

Of course, this roadmap is strongly influenced by the community, i.e. you. So if you would like to see any particular items here, please let us know.

We also have some exciting blog posts in the workings, e.g. on how to combine Debezium with the brand-new Kafka Connect connector for Apache Camel or how to use the recently added support for non-key joins in Kafka Streams (KIP-213) with Debezium change events.

One more thing I’m super-thrilled about is Debezium becoming a supported component of the Red Hat Integration product. Part of the current release is a Tech Preview for the change data capture connectors for MySQL, Postgres, SQL Server and MongoDB. This is great news for folks who wish to have commercial support by Red Hat for their CDC connectors.

For now, let’s celebrate the release of Debezium 1.0 and look forward to what’s coming in 2020.

Onwards and Upwards!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Streaming Now- Debezium 1.0 Final Is Out

Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

Why Debezium?

One of the things making it so enjoyable to work on Debezium as a tool for change data capture is the variety of potential use cases. When presenting the project at conferences, it’s just great to see how people quickly get excited when they realize all the possibilities enabled by Debezium and CDC.

In a nutshell, Debezium is one big enabler for letting you react to changes in your data with a low latency. Or, as one conference attendee recently put it, it’s "like the observer pattern, but for your database".

Here’s a few things we’ve seen Debezium being used for as a ingestion component in data streaming pipelines:

  • Replicating data from production databases to other databases and data warehouses

  • Feeding data to search services like Elasticsearch or Apache Solr

  • Updating or invalidating caches

When using Debezium with Apache Kafka and its rich ecosystem of sink connectors, setting up such integrations can be done without any coding, just by means of deploying and configuring connectors in Kafka Connect:

Data Streaming Pipeline With Debezium

But there are many other use cases of CDC which go beyond just moving data from A to B. When adding stream processing into the picture, e.g. via Kafka Streams or Apache Flink, CDC enables you to run time-windowed streaming queries, continuously updated as your operational data changes ("what’s the aggregated order revenue per category within the last hour"). You can use CDC to build audit logs of your data, telling who changed which data items at what time. Or update denormalized views of your data, for the sake of efficient data retrieval, adhering to the CQRS pattern (Command Query Responsibility Segregation).

Finally, CDC can also play a vital role in microservices architectures; exchanging data between services and keeping local views of data owned by other services achieves a higher independence, without having to rely on synchronous API calls. One particularly interesting approach in this context is the outbox pattern, which is well supported by Debezium. In case you don’t start on the green field (who ever does?), CDC can be used to implement the strangler pattern for moving from a monolithic design to microservices.

You can learn more about change data capture use cases with Debezium and Apache Kafka in this presentation from QCon San Francisco.

But you don’t have to take our word for it: you can find lots of blog posts, conference talks and examples by folks using Debezium in production in our compilation of resources. If you’d like to get a glimpse of who else already is using Debezium, see our rapidly growing list of reference users (or send us a pull request to get your name added if your organization already is running Debezium in production).

Debezium 1.0

Now, let’s talk a little bit about the contents of the 1.0 release.

This version continues the effort we began in 0.10 to make sure the emitted event structures and configuration options of the connectors are correct and consistent. While we’ve always been very careful to ensure a smooth upgrading experience, you can expect even more stability in this regard going forward after the 1.0 release.

We’ve expanded the test coverage of databases (Postgres 12, SQL Server 2019, MongoDB 4.2), upgraded our container images to OpenJDK 11 and now build against the latest version of Apache Kafka (2.4.0; earlier versions continue to be supported, too). And last but not least, we’ve also fixed a large number of bugs. Overall, 96 issues were addressed in Debezium 1.0 and its preview releases (Beta1, Beta2, Beta3, CR1).

If you’re on 0.10 right now, the upgrade is mostly a drop-in replacement. When coming from earlier versions, please make sure to read the migration notes to learn about deprecated options, upgrading procedures and more.

The Most Important Part: The Debezium Community

Debezium couldn’t exist without its community of contributors and users. I can’t begin to express how grateful I am for having the chance to be a member of this fantastic community, interacting and working with folks from around the world towards our joint goal of building the leading open-source solution for change data capture.

At this point, about 150 people have contributed to the different Debezium code repositories (please let me know if I’ve missed anybody):

Aaron Rosenberg, Addison Higham, Adrian Kreuziger, Akshath Patkar, Alexander Kovryga, Amit Sela, Andreas Bergmeier, Andras Istvan Nagy, Andrew Garrett, Andrew Tongen, Andrey Pustovetov, Anton Martynov, Arkoprabho Chakraborti, artiship, Ashhar Hasan, Attila Szucs, Barry LaFond, Bartosz Miedlar, Ben Williams, Bin Li, Bingqin Zhou, Braden Staudacher, Brandon Brown, Brandon Maguire, Cheng Pan, Ching Tsai, Chris Cranford, Chris Riccomini, Christian Posta, Chuck Ha, Cliff Wheadon, Collin Van Dyck, Cyril Scetbon, David Chen, David Feinblum, David Leibovic, David Szabo, Deepak Barr, Denis Mikhaylov, Dennis Campagna, Dennis Persson, Duncan Sands, Echo Xu, Eero Koplimets, Emrul Islam, Eric S. Kreiseir, Ewen Cheslack-Postava, Felix Eckhardt, Gagan Agrawal, Grant Cooksey, Guillaume Rosauro, Gunnar Morling, Gurnaaz Randhawa, Grzegorz Kołakowski, Hans-Peter Grahsl, Henryk Konsek, Horia Chiorean, Ian Axelrod, Ilia Bogdanov, Ivan Kovbas, Ivan Lorenz, Ivan Luzyanin, Ivan San Jose, Ivan Vucina, Jakub Cechacek, Jaromir Hamala, Javier Holguera, Jeremy Finzel, Jiri Pechanec, Johan Venant, John Martin, Jon Casstevens, Jordan Bragg, Jork Zijlstra, Josh Arenberg, Josh Stanfield, Joy Gao, Jure Kajzer, Keith Barber, Kevin Pullin, Kewen Chao, Krizhan Mariampillai, Leo Mei, Lev Zemlyanov, Listman Gamboa, Liu Hanlin, Luis Garcés-Erice, Maciej Bryński, MaoXiang Pan, Mario Mueller, Mariusz Strzelecki, Matteo Capitanio, Mathieu Rozieres, Matthias Wessendorf, Mike Graham, Mincong Huang, Moira Tagle, Muhammad Sufyian, Navdeep Agarwal, Nikhil Benesch, Olavi Mustanoja, Oliver Weiler, Olivier Lemasle, Omar Al-Safi, Ori Popowski, Orr Ganani, Peng Lyu, Peter Goransson, Peter Larsson, Philip Sanetra, Pradeep Mamillapalli, Prannoy Mittal, Preethi Sadagopan, pushpavanthar, Raf Liwoch, Ram Satish, Ramesh Reddy, Randall Hauch, Renato Mefi, Roman Kuchar, Sagar Rao, René Kerner, Rich O’Connell, Robert Coup, Sairam Polavarapu, Sanjay Kr Singh, Sanne Grinovero, Satyajit Vegesna, Saulius Valatka, Scofield Xu, Sherafudheen PM, Shivam Sharma, Shubham Rawat, Stanley Shyiko, Stathis Souris, Stephen Powis, Steven Siahetiong, Syed Muhammad Sufyian, Tautvydas Januskevicius, Taylor Rolison, Theofanis Despoudis, Thomas Deblock, Tom Bentley, Tomaz Lemos Fernandes, Tony Rizko, Wang-Yu-Chao, Wei Wu, WenZe Hu, William Pursell, Willie Cheong, Wout Scheepers, Yang Yang, Zheng Wang

You’re amazing, and I would like to wholeheartedly thank each and everyone of you! I’m sure our community will continue to grow in the future — I’d love it if we hit the mark of 200 contributors in 2020.

Equally important are our users; interacting with you in the chat, on the mailing list or at conferences and meet-ups is what helps to drive the direction of the project: learning about your specific requirements and use cases (or bugs you’ve run into) is vital for deciding where to put the focus next. A big thank you to you, too!

Some of you even have shared their experiences with Debezium in conference talks and blog posts. Nothing beats hearing the war stories of others and being able to learn from their experiences, so you speaking about your insights around Debezium and CDC is incredibly helpful and highly appreciated!

What’s Next?

Let’s wrap up this post with a look to see what’s next in store for Debezium.

After some long over-due holidays, we’re planning to begin the work on Debezium 1.1 in January. Some of the potential features you can look forward to are:

  • Support for the CloudEvents specification as a portable event format

  • A Quarkus extension for implementing the outbox pattern

  • A stand-alone Debezium server which will let you stream data change events to messaging infrastructure such as Amazon Kinesis

  • Means of exposing transactional boundaries on a separate topic, allowing to aggregate all the events originating from one source transaction and process them at once

  • Further progression of the incubating community-led connectors for Oracle and Apache Cassandra

Of course, this roadmap is strongly influenced by the community, i.e. you. So if you would like to see any particular items here, please let us know.

We also have some exciting blog posts in the workings, e.g. on how to combine Debezium with the brand-new Kafka Connect connector for Apache Camel or how to use the recently added support for non-key joins in Kafka Streams (KIP-213) with Debezium change events.

One more thing I’m super-thrilled about is Debezium becoming a supported component of the Red Hat Integration product. Part of the current release is a Tech Preview for the change data capture connectors for MySQL, Postgres, SQL Server and MongoDB. This is great news for folks who wish to have commercial support by Red Hat for their CDC connectors.

For now, let’s celebrate the release of Debezium 1.0 and look forward to what’s coming in 2020.

Onwards and Upwards!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2020/01/16/debezium-1-1-alpha1-released/index.html b/blog/2020/01/16/debezium-1-1-alpha1-released/index.html index 1b79d9f037..d750ac2d77 100644 --- a/blog/2020/01/16/debezium-1-1-alpha1-released/index.html +++ b/blog/2020/01/16/debezium-1-1-alpha1-released/index.html @@ -39,4 +39,4 @@ "name" : "Bob" } } -}

With CloudEvents, each event contains a few defined attributes such as id, source and type. The actual event payload can be found in the data attribute, which in the case of Debezium is the structure of old and new state of the affected database record. Most of the other Debezium change events attributes (op, timestamp, source metadata) are mapped to custom attributes using the iodebezium prefix.

The data attribute as well as the entire event can be encoded using JSON or Avro. Initially, only the "structured mode" of CloudEvents is supported, i.e. all the attributes are part of the event structure, which is the Kafka record value in this case. In a future release we’ll also add support for the CloudEvents "binary mode", where only the data attribute is part of the event structure, while all other attributes will be mapped the (Kafka) header attributes.

CloudEvents support is under active development, so details around the format likely will change in future versions as this feature matures. We’d love to get your feedback on this and learn from your insights and experiences with CloudEvents.

Further Changes

Besides these two larger features, a number of smaller improvements and fixes has been done for Debezium 1.1 Alpha1:

  • Column value masking for Postgres, allowing to replace sensitive column values with asterisks (DBZ-1685)

  • Several fixes to the MySQL DDL parser related to trigger definitions (DBZ-1699) and the SIGNAL keyword (DBZ-1691)

  • Two bugfixes around time and precision thereof (DBZ-1688, DBZ-1707)

Altogether, 17 issues were fixed for this release.

Many thanks to Andrea Cosentino, Vasily Ulianko, Vedit Firat Arig, Yongjun Du and Yuchao Wang for their contributions to this release!

Going forward, we’ll continue with further Debezium 1.1 preview releases every two to three weeks. Take a look at the roadmap to see what’s coming up, or get in touch to tell us about your specific feature requirements!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +}

With CloudEvents, each event contains a few defined attributes such as id, source and type. The actual event payload can be found in the data attribute, which in the case of Debezium is the structure of old and new state of the affected database record. Most of the other Debezium change events attributes (op, timestamp, source metadata) are mapped to custom attributes using the iodebezium prefix.

The data attribute as well as the entire event can be encoded using JSON or Avro. Initially, only the "structured mode" of CloudEvents is supported, i.e. all the attributes are part of the event structure, which is the Kafka record value in this case. In a future release we’ll also add support for the CloudEvents "binary mode", where only the data attribute is part of the event structure, while all other attributes will be mapped the (Kafka) header attributes.

CloudEvents support is under active development, so details around the format likely will change in future versions as this feature matures. We’d love to get your feedback on this and learn from your insights and experiences with CloudEvents.

Further Changes

Besides these two larger features, a number of smaller improvements and fixes has been done for Debezium 1.1 Alpha1:

  • Column value masking for Postgres, allowing to replace sensitive column values with asterisks (DBZ-1685)

  • Several fixes to the MySQL DDL parser related to trigger definitions (DBZ-1699) and the SIGNAL keyword (DBZ-1691)

  • Two bugfixes around time and precision thereof (DBZ-1688, DBZ-1707)

Altogether, 17 issues were fixed for this release.

Many thanks to Andrea Cosentino, Vasily Ulianko, Vedit Firat Arig, Yongjun Du and Yuchao Wang for their contributions to this release!

Going forward, we’ll continue with further Debezium 1.1 preview releases every two to three weeks. Take a look at the roadmap to see what’s coming up, or get in touch to tell us about your specific feature requirements!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2020/01/22/outbox-quarkus-extension/index.html b/blog/2020/01/22/outbox-quarkus-extension/index.html index 26e123d196..23a0c95439 100644 --- a/blog/2020/01/22/outbox-quarkus-extension/index.html +++ b/blog/2020/01/22/outbox-quarkus-extension/index.html @@ -104,4 +104,4 @@ "key": "1", "headers": "id=cc74eac7-176b-44e7-8bda-413a5088ca66,eventType=OrderCreated" } -"{\"id\":1,\"customerId\":123,\"orderDate\":\"2019-01-31T12:13:01\",\"lineItems\":[{\"id\":1,\"item\":\"Debezium in Action\",\"quantity\":2,\"totalPrice\":39.98,\"status\":\"ENTERED\"},{\"id\":2,\"item\":\"Debezium for Dummies\",\"quantity\":1,\"totalPrice\":29.99,\"status\":\"ENTERED\"}]}"

Wrapping up

It is really simple and easy to setup and use the Debezium Outbox extension.

We have a complete example in our examples repository that uses the order service described here as well as a shipment service that consumes the events. For more details on the extension, refer to the Outbox Quarkus Extension documentation.

Future Plans

The current implementation of the Debezium Outbox extension works quite well, but we acknowledge there is still room for improvement. Some of the things we’ve already identified and have plans to include in future iterations of the extension are:

  • Avro serialization support for event payload

  • Full outbox table column attribute control, e.g. definition, length, precision, scale, and converters.

  • Complete outbox table customization using a user-supplied entity class.

  • Allow varied signatures of ExportedEvent within a single application.

We are currently tracking all future changes to this extension in DBZ-1711. As always we welcome any and all feedback, so feel free to let us know in that issue, on Gitter, or the mailing lists.

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +"{\"id\":1,\"customerId\":123,\"orderDate\":\"2019-01-31T12:13:01\",\"lineItems\":[{\"id\":1,\"item\":\"Debezium in Action\",\"quantity\":2,\"totalPrice\":39.98,\"status\":\"ENTERED\"},{\"id\":2,\"item\":\"Debezium for Dummies\",\"quantity\":1,\"totalPrice\":29.99,\"status\":\"ENTERED\"}]}"

Wrapping up

It is really simple and easy to setup and use the Debezium Outbox extension.

We have a complete example in our examples repository that uses the order service described here as well as a shipment service that consumes the events. For more details on the extension, refer to the Outbox Quarkus Extension documentation.

Future Plans

The current implementation of the Debezium Outbox extension works quite well, but we acknowledge there is still room for improvement. Some of the things we’ve already identified and have plans to include in future iterations of the extension are:

  • Avro serialization support for event payload

  • Full outbox table column attribute control, e.g. definition, length, precision, scale, and converters.

  • Complete outbox table customization using a user-supplied entity class.

  • Allow varied signatures of ExportedEvent within a single application.

We are currently tracking all future changes to this extension in DBZ-1711. As always we welcome any and all feedback, so feel free to let us know in that issue, on Gitter, or the mailing lists.

Chris Cranford

Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2020/02/10/event-sourcing-vs-cdc/index.html b/blog/2020/02/10/event-sourcing-vs-cdc/index.html index e105de174b..c5575fcefa 100644 --- a/blog/2020/02/10/event-sourcing-vs-cdc/index.html +++ b/blog/2020/02/10/event-sourcing-vs-cdc/index.html @@ -1 +1 @@ - Distributed Data for Microservices — Event Sourcing vs. Change Data Capture

This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

Reactive Systems

Taking a quick step back, Event Sourcing and Change Data Capture are solutions that can be used to build distributed systems (i.e. microservices) that are Reactive. Microservices should react to an ever-changing environment (i.e. the cloud) by being resilient and elastic. The magic behind these abilities is being message and event driven. To find out more, I advise you to read the Reactive Manifesto.

Figure 1. Attributes of a Reactive System, per the Reactive Manifesto

Shared Goals for Event Sourcing and Change Data Capture

The two core solutions presented in this article are Event Sourcing and Change Data Capture. Before I formally introduce these two solutions, it can be known that they serve similar goals, which are:

  1. Designate one datastore as the global source of truth for a specific set of data

  2. Provide a representation of past and current application state as a series of events, also called a journal or transaction log

  3. Offer a journal that can replay events, as needed, for rebuilding or refreshing state

Event Sourcing uses its own journal as the source of truth, while Change Data Capture depends on the underlying database transaction log as the source of truth. This difference has major implications on the design and implementation of software which will be presented later in this article.

Domain Events vs. Change Events

Before we go deeper, it’s important to make a distinction about the types of events we are concerned about for Event Sourcing and Change Data Capture:

  • Domain events — An explicit event, part of your business domain, that is generated by your application. These events are usually represented in the past tense, such as OrderPlaced, or ItemShipped. These events are the primary concern for Event Sourcing.

  • Change events — Events that are generated from a database transaction log indicating what state transition has occurred. These events are of concern for Change Data Capture.

Domain events and change events are not related unless a change event happens to contain a domain event, which is a premise for the Outbox Pattern to be introduced later in the article.

Now that we have established some commonality on Event Sourcing and Change Data Capture, we can go deeper.

Event Sourcing

Event Sourcing is a solution that allows software to maintain its state as a journal of domain events. As such, taking the journal in its entirety represents the current state of the application. Having this journal also gives the ability to easily audit the history and also to time travel and reproduce errors generated by previous state.

Event Sourcing implementations usually have these characteristics:

  1. Domain events generated from the application business logic will add new state for your application

  2. State of the application is updated via an append-only event log (a journal) that is generally immutable

  3. Journal is considered the source of truth for the lifetime of the application

  4. Journal is replayable to rebuild the state of the application at any point in time

  5. Journal groups domain events by an ID to capture the current state of an object (an Aggregate from DDD parlance)

Figure 2. Representation of Event Sourcing Materializing an Object

Additionally, Event Sourcing implementations often have these characteristics:

  1. Snapshotting mechanism for the journal to speed up recreating the state of an application

  2. Mechanism to remove events from the journal as required (usually for compliance reasons)

  3. API for event dispatching that may be used for distributing state of the application

  4. Lack of transactional guarantees that are normally present for a strongly consistent system

  5. Backward compatibility mechanism to cope with changing event formats inside the journal

  6. Mechanism to backup and restore the journal, the source of truth for the application

Event sourcing mimics how a database works, but at the application-level. Per Figure 2, the figure could be updated to represent a database as shown in Figure 3 with roughly the same design.

Figure 3. Representation of Database Transaction Materializing a Table

The comparison between Figure 2 and Figure 3 will become more relevant as we dive deeper into how Event Sourcing and Change Data Capture compare to each other.

Change Data Capture

Change Data Capture (CDC) is a solution that captures change events from a database transaction log (or equivalent mechanism) and forwards those events to downstream consumers. CDC ultimately allows application state to be externalized and synchronized with external stores of data.

Change Data Capture implementations usually have these characteristics:

  1. External process that reads the transaction log of a database with the goal to materialize change events from those transactions

  2. Change events are forwarded to downstream consumers as messages

As you can see, CDC is a relatively simple concept with a very narrow scope. It’s simply externalizing the transaction log of the database as a stream of events to interested consumers.

Figure 4. Change Data Capture Implementation Options

CDC also gives you flexibility on how events are consumed. Per Figure 4:

  • Option 1 is a standalone CDC process to capture and forward events from the transaction log to a message broker

  • Option 2 is an embedded CDC client that sends events directly to an application

  • Option A is another connector that persists CDC events directly to a datastore

  • Option B forwards events to consuming applications via a message broker

Finally, a CDC implementation often has these characteristics:

  1. A durable message broker is used to forward events with at-least-once delivery guarantees to all consumers

  2. The ability to replay events from the datastore transaction log and/or message broker for as long as the events are persisted

CDC is very flexible and adaptable for multiple use cases. Early adopters of CDC were choosing Option 1/A, but Option 1/B, and also Option 2 are becoming more popular as CDC gains momentum.

Using CDC to Implement the Outbox Pattern

The primary goal of the Outbox Pattern is to ensure that updates to the application state (stored in tables) and publishing of the respective domain event is done within a single transaction. This involves creating an Outbox table in the database to collect those domain events as part of a transaction. Having transactional guarantees around the domain events and their propagation via the Outbox is important for data consistency across a system.

After the transaction completes, the domain events are then picked up by a CDC connector and forwarded to interested consumers using a reliable message broker (see Figure 5). Those consumers may then use the domain events to materialize their own aggregates (see above per Event Sourcing).

Figure 5. Outbox Pattern implemented with CDC (2 Options)

The Outbox is also meant to be abstracted from the application as it’s only an ephemeral store of outgoing event data, and not meant to be read or queried. In fact, the domain events residing in the Outbox may be deleted immediately after insertion!

Event Sourcing Journal vs. Outbox

We can now take a closer look at the overlap in design of an Event Sourcing journal and CDC with Outbox. By comparing the attributes of the journal with the Outbox table, the similarities become clear. The Aggregate, again from DDD, is at the heart of how the data is stored and consumed for both Outbox and Event Sourcing.

Here are the common attributes that exist between an Event Sourcing journal and an Outbox:

  • Event ID — Unique identifier for the event itself and can be used for de-duplication for idempotent consumers

  • Aggregate ID — Unique identifier used to partition related events; these events compose an Aggregate’s state

  • Aggregate Type — The type of the Aggregate that can be used for routing of events only to interested consumers

  • Sequence/Timestamp — A way to sort events to provide ordering guarantees

  • Message Payload — Contains the event data to be exchanged in a format readable by downstream consumers

The Outbox table and the Event Sourcing journal have essentially the same data format. The major difference is that the Event Sourcing journal is meant to be a permanent and immutable store of domain events, while the Outbox is meant to be highly ephemeral and only be a landing zone for domain events to be captured inside change events and forwarded to downstream consumers.

Command Query Responsibility Segregation

The Command Query Responsibility Segregation pattern, or CQRS for short, is commonly associated with Event Sourcing. However, Event Sourcing is not required to use CQRS. For example, the CQRS pattern could instead be implemented with the Outbox Pattern.

So what is CQRS anyways? It’s a pattern to create alternative representations of data, known as projections, for the primary purpose of being read-only, queryable views on some set of data. There may be multiple projections for the same set of data of interest to various clients.

The Command aspect to CQRS applies to an application processing actions (Commands) and ultimately generating domain events that can be used to create state for a projection. That is one reason why CQRS is so often associated with Event Sourcing.

Another reason why CQRS pairs well with Event Sourcing is because the journal is not queryable by the application. The only viable way to query data in an event sourced system is through the projections. Keep in mind, these projections are eventually consistent. This brings flexibility but also complexity and deviation from the norm of strongly consistent views that developers may be familiar with.

Figure 6. Representation of Event Sourcing with CQRS

Figure 7. Representation of Event Sourcing with CQRS using a Message Broker

As you can see in Figure 6 and Figure 7, these are two very different interpretations of the CQRS pattern based on Event Sourcing, but the end result is the same, a queryable projection of data originating only from events.

As stated earlier, CQRS can also be paired with the Outbox Pattern, as shown in Figure 8. An advantage with this design is there is still strong consistency within the application database but eventual consistency with the CQRS projections.

Figure 8. Representation of the Outbox Pattern with CQRS

Processing Domain Events Internally

While this article is very focused on distributing data across a system, using domain events internally for an application can also be important. Processing domain events internally is necessary for a variety of reasons which includes executing business logic within the same microservice context as the event originated from. This is common practice for building event-driven applications.

With either Event Sourcing or CDC, processing domain events internally requires a dispatcher mechanism to pass the event in memory. Some examples of this would be the Vert.x EventBus, Akka Actor System, or Spring Application Events. In the case of the Outbox pattern, the event would be dispatched only after the initial Outbox transaction completes successfully.

Comparison of Attributes

This article has thrown a lot at you, so a table summarizing what has been presented so far may be beneficial:

Attribute Event Sourcing CDC CDC + Outbox CQRS

Purpose

Capture state in a journal containing domain events.

Export Change Events from transaction log.

Export domain events from an Outbox via CDC.

Use domain events to generate projections of data.

Event Type

Domain Event

Change Event

Domain Event embedded in Change Event

Domain Event

Source of Truth

Journal

Transaction Log

Transaction Log

Depends on implementation

Boundary

Application

System

System (CDC) Application (Outbox)

Application or System

Consistency Model

N/A (only writing to the Journal)

Strongly Consistent (tables), Eventually Consistent (Change Event capture)

Strongly Consistent (Outbox), Eventually Consistent (Change Event capture)

Eventually Consistent

Replayability

Yes

Yes

Yes

Depends on implementation

Pros/Cons of Event Sourcing + CQRS

Now that we have a better handle on Event Sourcing and CQRS, let’s examine some of the pros and cons of Event Sourcing when paired with CQRS. These pros/cons take into consideration the current implementations that are available and also documented experiences from both myself and other professionals building distributed systems.

Pros for Event Sourcing with CQRS

  1. Journal is easily accessible for auditing purposes

  2. Generally performant for a high volume of write operations to the Journal

  3. Possibility to shard the Journal for a very large amount of data (depending on datastore)

Cons for Event Sourcing with CQRS

  1. Everything is eventually consistent data; a requirement of strongly consistent data doesn’t fit Event Sourcing and CQRS

  2. Cannot read your own writes to the journal (from a query perspective)

  3. Long term maintenance concerns around the journal and an event sourced architecture

  4. Need to write a lot of code for compensating actions for error cases

  5. No real transactional guarantees for resolving the dual writes flaw (to be covered next)

  6. Need to consider backward compatibility or migration of legacy data as the formats of events change

  7. Need to consider snapshotting the journal and the implications associated with it

  8. Talent pool for developers with experience using Event Sourcing and CQRS is virtually nonexistent

  9. Lack of use cases for Event Sourcing limits applicability

Dual Writes Risk for Event Sourcing and CQRS

One problem with Event Sourcing is that there is a possibility of failure to update the CQRS projections if there is an error with the application. This could result in missing data, and unfortunately, it may be difficult to recover that data without proper compensating actions built into the application itself. That is additional code and complexity that falls onto the developer, and is error prone. For example, one workaround is to track a read offset number that correlates to the event sourced journal, to give replayability upon error for reprocessing the domain events and refresh the CQRS projections.

The underlying reason for this possibility of errors is the lack of transactions for writing to both the Journal and the CQRS projections. This is what is known as “dual writes”, and it greatly increases the risk for errors. This dual writes flaw is represented in Figure 9.

Figure 9. Lack of Transactional Integrity with Event Sourcing and CQRS

Even adding a message broker, as shown in Figure 7 would not resolve the dual writes issue. With that design, you are still writing out the message to a message broker and an error could arise.

The dual writes flaw is just one example of some of the challenges in working with Event Sourcing with CQRS. Additionally, the long term maintenance and Day 2 impact of having the journal as the source of truth increases risk for your application over time. Event sourcing is also a paradigm that is unfamiliar to most engineers and is easy to make wrong assumptions or bad design choices that ultimately may lead to rearchitecting parts of your system.

Given the pros and cons about Event Sourcing paired with CQRS, it’s advisable to seek out alternatives before settling on this design. Your use case may fit Event Sourcing but CDC may also fit the bill.

Debezium for CDC and Outbox

Debezium is an open source CDC project supported by Red Hat that has gradually gained popularity over the past few years. Recently, Debezium added full support for the Outbox Pattern with an extension to the Quarkus Java microservice runtime.

Debezium, Quarkus, and the Outbox offer a comprehensive solution which avoids the Dual Writes flaw, and is generally a more practical solution for your average developer team as compared to Event Sourcing solutions.

Figure 10. Error Handling of the Outbox Pattern with CQRS

Pros for CDC + Outbox with Debezium

  1. Source of truth stays within the application database tables and transaction log

  2. Transactional guarantees and reliable messaging greatly reduce possibility for data loss or corruption

  3. Flexible solution that fits into a prototypical microservice architecture

  4. Simpler design is easier to maintain over the long term

  5. Can read and query your own writes

  6. Opportunity for strong consistency within the application database; eventual consistency across the remainder of the system

Cons for CDC + Outbox with Debezium

  1. Additional latency may be present by reading the transaction log and also going through a message broker; tuning may be required for minimizing latency

  2. Quarkus, while great, is the only current option for an off the shelf Outbox API; You could also roll your own implementation if needed

Conclusion

Building distributed systems, even with microservices, can be very challenging. That is what makes novel solutions like Event Sourcing appealing to consider. However, CDC and Outbox using Debezium is usually a better alternative to Event Sourcing, and is compatible with the CQRS pattern to boot. While Event Sourcing may still have value in some use cases, I encourage you to give Debezium and the Outbox a try first.

Eric Murphy

Eric is a Senior Architect at Red Hat where he leads consulting projects for application development and OpenShift. Eric resides near Seattle.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file + Distributed Data for Microservices — Event Sourcing vs. Change Data Capture

This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

Reactive Systems

Taking a quick step back, Event Sourcing and Change Data Capture are solutions that can be used to build distributed systems (i.e. microservices) that are Reactive. Microservices should react to an ever-changing environment (i.e. the cloud) by being resilient and elastic. The magic behind these abilities is being message and event driven. To find out more, I advise you to read the Reactive Manifesto.

Figure 1. Attributes of a Reactive System, per the Reactive Manifesto

Shared Goals for Event Sourcing and Change Data Capture

The two core solutions presented in this article are Event Sourcing and Change Data Capture. Before I formally introduce these two solutions, it can be known that they serve similar goals, which are:

  1. Designate one datastore as the global source of truth for a specific set of data

  2. Provide a representation of past and current application state as a series of events, also called a journal or transaction log

  3. Offer a journal that can replay events, as needed, for rebuilding or refreshing state

Event Sourcing uses its own journal as the source of truth, while Change Data Capture depends on the underlying database transaction log as the source of truth. This difference has major implications on the design and implementation of software which will be presented later in this article.

Domain Events vs. Change Events

Before we go deeper, it’s important to make a distinction about the types of events we are concerned about for Event Sourcing and Change Data Capture:

  • Domain events — An explicit event, part of your business domain, that is generated by your application. These events are usually represented in the past tense, such as OrderPlaced, or ItemShipped. These events are the primary concern for Event Sourcing.

  • Change events — Events that are generated from a database transaction log indicating what state transition has occurred. These events are of concern for Change Data Capture.

Domain events and change events are not related unless a change event happens to contain a domain event, which is a premise for the Outbox Pattern to be introduced later in the article.

Now that we have established some commonality on Event Sourcing and Change Data Capture, we can go deeper.

Event Sourcing

Event Sourcing is a solution that allows software to maintain its state as a journal of domain events. As such, taking the journal in its entirety represents the current state of the application. Having this journal also gives the ability to easily audit the history and also to time travel and reproduce errors generated by previous state.

Event Sourcing implementations usually have these characteristics:

  1. Domain events generated from the application business logic will add new state for your application

  2. State of the application is updated via an append-only event log (a journal) that is generally immutable

  3. Journal is considered the source of truth for the lifetime of the application

  4. Journal is replayable to rebuild the state of the application at any point in time

  5. Journal groups domain events by an ID to capture the current state of an object (an Aggregate from DDD parlance)

Figure 2. Representation of Event Sourcing Materializing an Object

Additionally, Event Sourcing implementations often have these characteristics:

  1. Snapshotting mechanism for the journal to speed up recreating the state of an application

  2. Mechanism to remove events from the journal as required (usually for compliance reasons)

  3. API for event dispatching that may be used for distributing state of the application

  4. Lack of transactional guarantees that are normally present for a strongly consistent system

  5. Backward compatibility mechanism to cope with changing event formats inside the journal

  6. Mechanism to backup and restore the journal, the source of truth for the application

Event sourcing mimics how a database works, but at the application-level. Per Figure 2, the figure could be updated to represent a database as shown in Figure 3 with roughly the same design.

Figure 3. Representation of Database Transaction Materializing a Table

The comparison between Figure 2 and Figure 3 will become more relevant as we dive deeper into how Event Sourcing and Change Data Capture compare to each other.

Change Data Capture

Change Data Capture (CDC) is a solution that captures change events from a database transaction log (or equivalent mechanism) and forwards those events to downstream consumers. CDC ultimately allows application state to be externalized and synchronized with external stores of data.

Change Data Capture implementations usually have these characteristics:

  1. External process that reads the transaction log of a database with the goal to materialize change events from those transactions

  2. Change events are forwarded to downstream consumers as messages

As you can see, CDC is a relatively simple concept with a very narrow scope. It’s simply externalizing the transaction log of the database as a stream of events to interested consumers.

Figure 4. Change Data Capture Implementation Options

CDC also gives you flexibility on how events are consumed. Per Figure 4:

  • Option 1 is a standalone CDC process to capture and forward events from the transaction log to a message broker

  • Option 2 is an embedded CDC client that sends events directly to an application

  • Option A is another connector that persists CDC events directly to a datastore

  • Option B forwards events to consuming applications via a message broker

Finally, a CDC implementation often has these characteristics:

  1. A durable message broker is used to forward events with at-least-once delivery guarantees to all consumers

  2. The ability to replay events from the datastore transaction log and/or message broker for as long as the events are persisted

CDC is very flexible and adaptable for multiple use cases. Early adopters of CDC were choosing Option 1/A, but Option 1/B, and also Option 2 are becoming more popular as CDC gains momentum.

Using CDC to Implement the Outbox Pattern

The primary goal of the Outbox Pattern is to ensure that updates to the application state (stored in tables) and publishing of the respective domain event is done within a single transaction. This involves creating an Outbox table in the database to collect those domain events as part of a transaction. Having transactional guarantees around the domain events and their propagation via the Outbox is important for data consistency across a system.

After the transaction completes, the domain events are then picked up by a CDC connector and forwarded to interested consumers using a reliable message broker (see Figure 5). Those consumers may then use the domain events to materialize their own aggregates (see above per Event Sourcing).

Figure 5. Outbox Pattern implemented with CDC (2 Options)

The Outbox is also meant to be abstracted from the application as it’s only an ephemeral store of outgoing event data, and not meant to be read or queried. In fact, the domain events residing in the Outbox may be deleted immediately after insertion!

Event Sourcing Journal vs. Outbox

We can now take a closer look at the overlap in design of an Event Sourcing journal and CDC with Outbox. By comparing the attributes of the journal with the Outbox table, the similarities become clear. The Aggregate, again from DDD, is at the heart of how the data is stored and consumed for both Outbox and Event Sourcing.

Here are the common attributes that exist between an Event Sourcing journal and an Outbox:

  • Event ID — Unique identifier for the event itself and can be used for de-duplication for idempotent consumers

  • Aggregate ID — Unique identifier used to partition related events; these events compose an Aggregate’s state

  • Aggregate Type — The type of the Aggregate that can be used for routing of events only to interested consumers

  • Sequence/Timestamp — A way to sort events to provide ordering guarantees

  • Message Payload — Contains the event data to be exchanged in a format readable by downstream consumers

The Outbox table and the Event Sourcing journal have essentially the same data format. The major difference is that the Event Sourcing journal is meant to be a permanent and immutable store of domain events, while the Outbox is meant to be highly ephemeral and only be a landing zone for domain events to be captured inside change events and forwarded to downstream consumers.

Command Query Responsibility Segregation

The Command Query Responsibility Segregation pattern, or CQRS for short, is commonly associated with Event Sourcing. However, Event Sourcing is not required to use CQRS. For example, the CQRS pattern could instead be implemented with the Outbox Pattern.

So what is CQRS anyways? It’s a pattern to create alternative representations of data, known as projections, for the primary purpose of being read-only, queryable views on some set of data. There may be multiple projections for the same set of data of interest to various clients.

The Command aspect to CQRS applies to an application processing actions (Commands) and ultimately generating domain events that can be used to create state for a projection. That is one reason why CQRS is so often associated with Event Sourcing.

Another reason why CQRS pairs well with Event Sourcing is because the journal is not queryable by the application. The only viable way to query data in an event sourced system is through the projections. Keep in mind, these projections are eventually consistent. This brings flexibility but also complexity and deviation from the norm of strongly consistent views that developers may be familiar with.

Figure 6. Representation of Event Sourcing with CQRS

Figure 7. Representation of Event Sourcing with CQRS using a Message Broker

As you can see in Figure 6 and Figure 7, these are two very different interpretations of the CQRS pattern based on Event Sourcing, but the end result is the same, a queryable projection of data originating only from events.

As stated earlier, CQRS can also be paired with the Outbox Pattern, as shown in Figure 8. An advantage with this design is there is still strong consistency within the application database but eventual consistency with the CQRS projections.

Figure 8. Representation of the Outbox Pattern with CQRS

Processing Domain Events Internally

While this article is very focused on distributing data across a system, using domain events internally for an application can also be important. Processing domain events internally is necessary for a variety of reasons which includes executing business logic within the same microservice context as the event originated from. This is common practice for building event-driven applications.

With either Event Sourcing or CDC, processing domain events internally requires a dispatcher mechanism to pass the event in memory. Some examples of this would be the Vert.x EventBus, Akka Actor System, or Spring Application Events. In the case of the Outbox pattern, the event would be dispatched only after the initial Outbox transaction completes successfully.

Comparison of Attributes

This article has thrown a lot at you, so a table summarizing what has been presented so far may be beneficial:

Attribute Event Sourcing CDC CDC + Outbox CQRS

Purpose

Capture state in a journal containing domain events.

Export Change Events from transaction log.

Export domain events from an Outbox via CDC.

Use domain events to generate projections of data.

Event Type

Domain Event

Change Event

Domain Event embedded in Change Event

Domain Event

Source of Truth

Journal

Transaction Log

Transaction Log

Depends on implementation

Boundary

Application

System

System (CDC) Application (Outbox)

Application or System

Consistency Model

N/A (only writing to the Journal)

Strongly Consistent (tables), Eventually Consistent (Change Event capture)

Strongly Consistent (Outbox), Eventually Consistent (Change Event capture)

Eventually Consistent

Replayability

Yes

Yes

Yes

Depends on implementation

Pros/Cons of Event Sourcing + CQRS

Now that we have a better handle on Event Sourcing and CQRS, let’s examine some of the pros and cons of Event Sourcing when paired with CQRS. These pros/cons take into consideration the current implementations that are available and also documented experiences from both myself and other professionals building distributed systems.

Pros for Event Sourcing with CQRS

  1. Journal is easily accessible for auditing purposes

  2. Generally performant for a high volume of write operations to the Journal

  3. Possibility to shard the Journal for a very large amount of data (depending on datastore)

Cons for Event Sourcing with CQRS

  1. Everything is eventually consistent data; a requirement of strongly consistent data doesn’t fit Event Sourcing and CQRS

  2. Cannot read your own writes to the journal (from a query perspective)

  3. Long term maintenance concerns around the journal and an event sourced architecture

  4. Need to write a lot of code for compensating actions for error cases

  5. No real transactional guarantees for resolving the dual writes flaw (to be covered next)

  6. Need to consider backward compatibility or migration of legacy data as the formats of events change

  7. Need to consider snapshotting the journal and the implications associated with it

  8. Talent pool for developers with experience using Event Sourcing and CQRS is virtually nonexistent

  9. Lack of use cases for Event Sourcing limits applicability

Dual Writes Risk for Event Sourcing and CQRS

One problem with Event Sourcing is that there is a possibility of failure to update the CQRS projections if there is an error with the application. This could result in missing data, and unfortunately, it may be difficult to recover that data without proper compensating actions built into the application itself. That is additional code and complexity that falls onto the developer, and is error prone. For example, one workaround is to track a read offset number that correlates to the event sourced journal, to give replayability upon error for reprocessing the domain events and refresh the CQRS projections.

The underlying reason for this possibility of errors is the lack of transactions for writing to both the Journal and the CQRS projections. This is what is known as “dual writes”, and it greatly increases the risk for errors. This dual writes flaw is represented in Figure 9.

Figure 9. Lack of Transactional Integrity with Event Sourcing and CQRS

Even adding a message broker, as shown in Figure 7 would not resolve the dual writes issue. With that design, you are still writing out the message to a message broker and an error could arise.

The dual writes flaw is just one example of some of the challenges in working with Event Sourcing with CQRS. Additionally, the long term maintenance and Day 2 impact of having the journal as the source of truth increases risk for your application over time. Event sourcing is also a paradigm that is unfamiliar to most engineers and is easy to make wrong assumptions or bad design choices that ultimately may lead to rearchitecting parts of your system.

Given the pros and cons about Event Sourcing paired with CQRS, it’s advisable to seek out alternatives before settling on this design. Your use case may fit Event Sourcing but CDC may also fit the bill.

Debezium for CDC and Outbox

Debezium is an open source CDC project supported by Red Hat that has gradually gained popularity over the past few years. Recently, Debezium added full support for the Outbox Pattern with an extension to the Quarkus Java microservice runtime.

Debezium, Quarkus, and the Outbox offer a comprehensive solution which avoids the Dual Writes flaw, and is generally a more practical solution for your average developer team as compared to Event Sourcing solutions.

Figure 10. Error Handling of the Outbox Pattern with CQRS

Pros for CDC + Outbox with Debezium

  1. Source of truth stays within the application database tables and transaction log

  2. Transactional guarantees and reliable messaging greatly reduce possibility for data loss or corruption

  3. Flexible solution that fits into a prototypical microservice architecture

  4. Simpler design is easier to maintain over the long term

  5. Can read and query your own writes

  6. Opportunity for strong consistency within the application database; eventual consistency across the remainder of the system

Cons for CDC + Outbox with Debezium

  1. Additional latency may be present by reading the transaction log and also going through a message broker; tuning may be required for minimizing latency

  2. Quarkus, while great, is the only current option for an off the shelf Outbox API; You could also roll your own implementation if needed

Conclusion

Building distributed systems, even with microservices, can be very challenging. That is what makes novel solutions like Event Sourcing appealing to consider. However, CDC and Outbox using Debezium is usually a better alternative to Event Sourcing, and is compatible with the CQRS pattern to boot. While Event Sourcing may still have value in some use cases, I encourage you to give Debezium and the Outbox a try first.

Eric Murphy

Eric is a Senior Architect at Red Hat where he leads consulting projects for application development and OpenShift. Eric resides near Seattle.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2020/02/11/debezium-1-1-beta1-released/index.html b/blog/2020/02/11/debezium-1-1-beta1-released/index.html index 1dd9ce56c0..916c4eccc5 100644 --- a/blog/2020/02/11/debezium-1-1-beta1-released/index.html +++ b/blog/2020/02/11/debezium-1-1-beta1-released/index.html @@ -19,4 +19,4 @@ "event_count": "10" } ] -}

END events contain the total number of change events originating from this transaction as well as the number of event per affected table. Actual data change events in the topics for the purchase order and order line tables contain the transaction id, too. These two things together enable a stream processing application to buffer all the change events originating from one transaction. Only once it has received all the events of a transaction, it may produce the final aggregate view and publish it to downstream consumers, avoiding the issue of exposing intermediary aggregate views.

Stay tuned for an in-depth example of such an implementation coming soon!

IBM Db2 Connector

Support for the IBM Db2 database has been on the wishlist for many Debezium users for quite some time. That’s why we were very excited when a group of IBM engineers reached out to us a while ago, offering to implement this connector under the Debezium umbrella. This connector is released in "incubating" state in Debezium 1.1.

Note that at this point a license for the IBM IIDR product is required in order to use the "ASN Capture" API leveraged by the connector. A post with more details around this connector should follow soon; in the mean time please refer to the connector reference documentation to learn more.

Further Changes and Bugfixes

Besides the transaction metadata topic and the Db2 connector, a few more improvements and fixes have been completed for Debezium 1.1 Beta1:

  • The message transformation for extracting the after state from change events allows to route change events to a specific topic based on a configurable record field (DBZ-1715)

  • The ExtractNewDocumentState SMT to be used with the Debezium MongoDB connector will convert Date and Timestamp fields now into the org.apache.kafka.connect.data.Timestamp logical type, clarifying its semantics (DBZ-1717)

  • The MySQL connector won’t log the dabase password in DEBUG level any longer (DBZ-1748)

  • The Antlr DDL parser of the MySQL connector handles the TRANSACTIONAL keyword of MariaDB (DBZ-1733) as well as the GET DIAGNOSTICS statement (DBZ-1740)

  • The Postgres connector can be used with proxied connections (DBZ-1738)

Overall, 27 issues were fixed for this release. 16 bugfixes from 1.1 Alpha1 and Beta1 were backported to 1.0.1.Final. Please make sure to read the upgrade notes when upgrading the Postgres connector and the accompanying decoderbufs logical decoding plugin to 1.1 Beta1, as a specific order of upgrading the two is needed.

On our road towards Debezium 1.1, we’ll likely do another Beta release before going to the candidate release phase in a few weeks from now. To see what’s coming, take a look at the roadmap, or get in touch to tell us about your specific feature requirements!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +}

END events contain the total number of change events originating from this transaction as well as the number of event per affected table. Actual data change events in the topics for the purchase order and order line tables contain the transaction id, too. These two things together enable a stream processing application to buffer all the change events originating from one transaction. Only once it has received all the events of a transaction, it may produce the final aggregate view and publish it to downstream consumers, avoiding the issue of exposing intermediary aggregate views.

Stay tuned for an in-depth example of such an implementation coming soon!

IBM Db2 Connector

Support for the IBM Db2 database has been on the wishlist for many Debezium users for quite some time. That’s why we were very excited when a group of IBM engineers reached out to us a while ago, offering to implement this connector under the Debezium umbrella. This connector is released in "incubating" state in Debezium 1.1.

Note that at this point a license for the IBM IIDR product is required in order to use the "ASN Capture" API leveraged by the connector. A post with more details around this connector should follow soon; in the mean time please refer to the connector reference documentation to learn more.

Further Changes and Bugfixes

Besides the transaction metadata topic and the Db2 connector, a few more improvements and fixes have been completed for Debezium 1.1 Beta1:

  • The message transformation for extracting the after state from change events allows to route change events to a specific topic based on a configurable record field (DBZ-1715)

  • The ExtractNewDocumentState SMT to be used with the Debezium MongoDB connector will convert Date and Timestamp fields now into the org.apache.kafka.connect.data.Timestamp logical type, clarifying its semantics (DBZ-1717)

  • The MySQL connector won’t log the dabase password in DEBUG level any longer (DBZ-1748)

  • The Antlr DDL parser of the MySQL connector handles the TRANSACTIONAL keyword of MariaDB (DBZ-1733) as well as the GET DIAGNOSTICS statement (DBZ-1740)

  • The Postgres connector can be used with proxied connections (DBZ-1738)

Overall, 27 issues were fixed for this release. 16 bugfixes from 1.1 Alpha1 and Beta1 were backported to 1.0.1.Final. Please make sure to read the upgrade notes when upgrading the Postgres connector and the accompanying decoderbufs logical decoding plugin to 1.1 Beta1, as a specific order of upgrading the two is needed.

On our road towards Debezium 1.1, we’ll likely do another Beta release before going to the candidate release phase in a few weeks from now. To see what’s coming, take a look at the roadmap, or get in touch to tell us about your specific feature requirements!

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2020/02/13/debezium-1-1-beta2-released/index.html b/blog/2020/02/13/debezium-1-1-beta2-released/index.html index c047f419a4..70bfcc32e5 100644 --- a/blog/2020/02/13/debezium-1-1-beta2-released/index.html +++ b/blog/2020/02/13/debezium-1-1-beta2-released/index.html @@ -1,4 +1,4 @@ Debezium 1.1.0.Beta2 Released

Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

Testcontainers Support

When setting up CDC pipelines, you should test your configuration thoroughly: the source database must be correctly configured (e.g. think binlog mode of MySQL), connectors must use the right credentials, filters and more.

Automation is king, and thus we’re very excited about the new support for writing CDC integration tests using Testcontainers (DBZ-1722). With just a few lines of code, you can set up all the required services using Linux containers, deploy a Debezium connector and run assertions against the emitted change data events.

We’re planning to publish a blog post dedicated to this topic very soon, in the mean time refer to to the documentation to learn more.

More Configuration Options for the Quarkus Outbox Pattern Extension

The Quarkus extension for implementing the outbox pattern comes in handy if a service needs to update its own database as well as send events to external consumers: by writing events into an "outbox" table and capturing them from there using Debezium, unsafe "dual writes" to a database and Apache Kafka are avoided.

To offer more flexibility, the Quarkus extension allows now for fully flexible customization of the outbox table’s column types DBZ-1711. E.g. you can set the option quarkus.debezium-outbox.payload.column-definition to JSONB NOT NULL, in order to use a Postgres JSONB column for the outbox table’s payload column.

More Flexible After State Extraction

The SMT for extracting the after state of change events allows for a more flexible propagation of specific event attributes now: using the new add.fields and add.headers options, any top-level attribute (op, ts_ms) as well as source and transaction attributes can be propagated into the outgoing record and/or as header of the Kafka record:

transforms=unwrap,...
 transforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState
 transforms.unwrap.add.fields=table,lsn
-transforms.unwrap.add.headers=op,source.ts_ms

The existing operation.header and add.source.fields options have been deprecated and will be removed in a future Debezium version.

Further Changes and Bugfixes

Overall 13 issues have been addressed for the Debezium 1.1 Beta2 release.

Most notably, a known Kafka Connect issue has been mitigated which may cause missed change events when the Postgres or Oracle connectors are stopped (DBZ-1766), and the MongoDB SMT for extracting the new document state handles identifiers with characters un-supported by Apache Avro, e.g. $ref, gracefully (DBZ-1767).

A big shout-out to all the contributors of this release: Alex Soto, Daan Roosen, John Psoroulas, Matthias Wessendorf, Melissa Winstanley, as well as Sergei Egorov for his advice on the Testcontainers work.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +transforms.unwrap.add.headers=op,source.ts_ms

The existing operation.header and add.source.fields options have been deprecated and will be removed in a future Debezium version.

Further Changes and Bugfixes

Overall 13 issues have been addressed for the Debezium 1.1 Beta2 release.

Most notably, a known Kafka Connect issue has been mitigated which may cause missed change events when the Postgres or Oracle connectors are stopped (DBZ-1766), and the MongoDB SMT for extracting the new document state handles identifiers with characters un-supported by Apache Avro, e.g. $ref, gracefully (DBZ-1767).

A big shout-out to all the contributors of this release: Alex Soto, Daan Roosen, John Psoroulas, Matthias Wessendorf, Melissa Winstanley, as well as Sergei Egorov for his advice on the Testcontainers work.

Gunnar Morling

Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2020/02/19/debezium-camel-integration/index.html b/blog/2020/02/19/debezium-camel-integration/index.html index 0e11bd792d..baa4e1e346 100644 --- a/blog/2020/02/19/debezium-camel-integration/index.html +++ b/blog/2020/02/19/debezium-camel-integration/index.html @@ -103,4 +103,4 @@ }
1 from is the Debezium source endpoint. The URI parts map directly to connector configuration options.
2 The pipeline logic is split depending on the change event type. The recognition is based on CamelDebeziumIdentifier header which contains the identifier (<server_name>.<schema_name>.<table_name>) of the source table.
3 The pipeline is now able to process only updates and deletes. The recognition is based on CamelDebeziumOperation header that contains op field of the message Envelope.
4 The Kafka Connect’s Struct type is converted into a logical type used in the pipeline. The conversion is performed by a custom Camel converter. It is possible to use out-of-the-box DebeziumTypeConverter that converts Struct into a Map but this tightly couples pipeline logic into the table structure.
5 A supplementary route is invoked that communicates with a message store based on an Infinispan cache to build a message aggregate. The message store checks if it has the question already stored. If not a new aggregate is created and stored otherwise the stored aggregate is updated with new data.
6 A supplementary route is invoked that formats a mail message and delivers it to the question creator via the SMTP endpoint.
7 The route part related to the answer message type is very similar (answers are added to question aggregate). The main difference is the posting of a Twitter message when the aggregate contains three answers.

On a side note, for the sake of simplicitiy, the example currently uses volatile memory to store the Debezium offsets. For persistent storage you could either use a file-based offset store or create a custom offset store implementation based on Infinispan, delegating the storage of offsets to the underlying cache.

Demo

In order to run the demo, you need to have a Twitter developer account with appropriate API keys and secrets.

Go to the application directory and build all components:

$ mvn clean install

Start the services (provide your own Twitter API credentials):

$ env TWITTER_CONSUMER_KEY=<...> TWITTER_CONSUMER_SECRET=<...> TWITTER_ACCESS_TOKEN=<...> TWITTER_ACCESS_TOKEN_SECRET=<...> docker-compose up

In another terminal create a question and three answers to it:

$ curl -v -X POST -H 'Content-Type: application/json' http://0.0.0.0:8080/question/ -d @src/test/resources/messages/create-question.json
 $ curl -v -X POST -H 'Content-Type: application/json' http://0.0.0.0:8080/question/1/answer -d @src/test/resources/messages/create-answer1.json
 $ curl -v -X POST -H 'Content-Type: application/json' http://0.0.0.0:8080/question/1/answer -d @src/test/resources/messages/create-answer2.json
-$ curl -v -X POST -H 'Content-Type: application/json' http://0.0.0.0:8080/question/1/answer -d @src/test/resources/messages/create-answer3.json

The Twitter account should contain a new tweet with a text like "Question 'How many legs does a dog have?' has many answers (generated at 2020-02-17T08:02:33.744Z)". Also the MailHog server UI should display messages like these:

Figure 4. The MailHog Messages

Conclusion

Apache Camel is a very interesting option for implementing system integration scenarios.

Without the need for any external messaging infrastructure, it is very easy to deploy a standalone Camel route with the Debezium component, enabling the capture of data changes and execution of complex routing and transformation operations on them. Camel equips the developer with a full arsenal of enterprise integration pattern implementations, as well as more than hundred connectors for different systems that could be included in a complex service orchestration.

The source code of the full example is available on GitHub.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file +$ curl -v -X POST -H 'Content-Type: application/json' http://0.0.0.0:8080/question/1/answer -d @src/test/resources/messages/create-answer3.json

The Twitter account should contain a new tweet with a text like "Question 'How many legs does a dog have?' has many answers (generated at 2020-02-17T08:02:33.744Z)". Also the MailHog server UI should display messages like these:

Figure 4. The MailHog Messages

Conclusion

Apache Camel is a very interesting option for implementing system integration scenarios.

Without the need for any external messaging infrastructure, it is very easy to deploy a standalone Camel route with the Debezium component, enabling the capture of data changes and execution of complex routing and transformation operations on them. Camel equips the developer with a full arsenal of enterprise integration pattern implementations, as well as more than hundred connectors for different systems that could be included in a complex service orchestration.

The source code of the full example is available on GitHub.

Jiri Pechanec

Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

   


About Debezium

Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

Get involved

We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

\ No newline at end of file diff --git a/blog/2020/02/25/lessons-learned-running-debezium-with-postgresql-on-rds/index.html b/blog/2020/02/25/lessons-learned-running-debezium-with-postgresql-on-rds/index.html index 52ac1bcfdd..cc639661ab 100644 --- a/blog/2020/02/25/lessons-learned-running-debezium-with-postgresql-on-rds/index.html +++ b/blog/2020/02/25/lessons-learned-running-debezium-with-postgresql-on-rds/index.html @@ -6,4 +6,4 @@ LogSequenceNumber a = LogSequenceNumber.valueOf(1516427642656L); System.out.println(a.asString()); } -}
  • Peek changes from the WAL upto the LSN above using SELECT pg_logical_slot_peek_changes('<your-slot-name>', '<lsn-from-above>', 1). This is the replication change that we are going to skip, so please make sure that this is the record that you want to skip. Once confirmed, proceed to next step.

  • Advance the replication slot by skipping 1 change using SELECT pg_logical_slot_get_changes('<your-slot-name>', NULL, 1). This will consume 1 change from the replication slot.

  • Publish a message to Debezium’s offset topic with the next LSN and TxId. We were able to successfully get it working by adding 1 to both the lsn and the txId.

  • Deploy Debezium again and it should have skipped the record.

  • Conclusion

    Why Debezium?

    In closing we would like to highlight the issues Debezium has solved for us.

    One of the biggest concerns when handling any data is regarding data consistency and Debezium helps us avoid dual writes and maintains data consistency between our RDBMS and Kafka which makes it easier to ensure data consistency in all further layers.

    Debezium enables low overhead change data capture and now we have ended up defaulting to enabling Debezium for all new data sources being created.

    Debezium’s support for a wide variety of data sources, PostgreSQL, MySQL and MongoDB specifically, helps us provide a standard technology and platform to perform data integration on. No more having to write custom code to connect each data source.

    Debezium being open source proved to be immensely useful in the early days to make sure we were able to send in patches for a few bugs ourselves without having to ask someone to prioritise the issue. And since it’s open source there is a growing community around it which can help you figure out your issues and provide general guidance. Check out this page on the Debezium website for a lot of awesome community contributed content.

    Challenges

    Having said the above Debezium is still quite a young project and has a few areas in which improvement will be welcome (and your contributions too in the form of code, design, ideas, documentation and even blog posts):

    • Zero-downtime high availability. Debezium relies on the Kafka Connect framework to provide high availability but it does not provide something similar to a hot standby instance. It takes time for an existing connector to shut down and a new instance to come up - which might be acceptable for a few use-cases but unacceptable in others. See this blog post by BlaBlaCar for a discussion and their solution around it.

    • Support for other data sinks besides Kafka. In a few scenarios you might want to directly move the events from your database to an API, a different data store or maybe a different message broker. But since Debezium is currently written on top of Kafka Connect it can only write the data into Kafka. Debezium does provide an embedded engine which you can use as a library to consume change events in your Java applications. See the documentation around embedding Debezium. In case you do end up writing a different adapter around Debezium to move data into a different destination, consider making it open source so that both you benefit by additional maintainers and the community benefits by getting new use cases solved.

    • Common framework to write any new CDC implementation. We particularly have a use case of performing CDC on top of AWS DynamoDB. Instead of writing a custom Kafka Connector from scratch, we can reuse the Debezium core framework and write only the DynamoDB specific parts. This will help prevent bugs since a lot of the existing flows and edge cases might have already been handled. There is ongoing work around this theme to refactor all existing Debezium connectors to use the common framework to make it easier to write new custom connectors. For an example of how to implement one, take a look at the Debezium incubator repository.

    • A few minor annoyances which are already tracked on the project’s issue tracker - specifically DBZ-1760 (skipping unparseable records), DBZ-1263 (update table whitelist for existing connector), DBZ-1723 (Reconnect to DB on failure), DBZ-823 (Parallel snapshots).

    Future Scope

    We do have a few tasks planned for the future to improve our existing workflow regarding Debezium and Kafka Connect.

    • Upgrading to Debezium v1.0. Debezium recently released the first 1.0 release with a number of new features including support for the CloudEvents format which we are looking towards to provide a unified message format for all data across the organisation.

    • Trying out the Outbox design pattern as documented at Reliable Microservices Data Exchange With the Outbox Pattern to unify application events and data change events. The outbox pattern also provides transactional guarantees across service boundaries in a microservices system - something everybody wants in an event based microservices architecture.

    • Setting up an Apache Atlas integration to automate the creation of data sources and tracking data lineage in Atlas to help with data governance and discoverability.

    • Writing and open sourcing an AWS DynamoDB CDC connector as a Debezium connector. Since we are using AWS DynamoDB too we need to provide the same capabilities that the other data sources are using in terms of CDC. For that we are writing a DynamoDB CDC connector using Debezium as a framework. The work is still in its early stages and is planned to be released as an open source connector.

    So overall, we started the post by sharing our business use-case and discussed how Debezium has helped us solve them. We then detailed how we have been running Debezium in production for performing CDC on PostgreSQL on AWS RDS and talked about the mistakes we made when starting out and how to solve them. And as is common in software engineering, we did face production incidents along the way and are sharing our learnings from that incident in the hopes that they might be useful for the wider community.

    Also a lot of thanks to the people who reviewed this post including Gunnar Morling, Kapil Bharati and Akash Deep Verma.

    Further Reading

    Ashhar Hasan

    Ashhar is a software engineer at Delhivery where he focuses on change data capture, data warehousing, and creating event-driven systems.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}
  • Peek changes from the WAL upto the LSN above using SELECT pg_logical_slot_peek_changes('<your-slot-name>', '<lsn-from-above>', 1). This is the replication change that we are going to skip, so please make sure that this is the record that you want to skip. Once confirmed, proceed to next step.

  • Advance the replication slot by skipping 1 change using SELECT pg_logical_slot_get_changes('<your-slot-name>', NULL, 1). This will consume 1 change from the replication slot.

  • Publish a message to Debezium’s offset topic with the next LSN and TxId. We were able to successfully get it working by adding 1 to both the lsn and the txId.

  • Deploy Debezium again and it should have skipped the record.

  • Conclusion

    Why Debezium?

    In closing we would like to highlight the issues Debezium has solved for us.

    One of the biggest concerns when handling any data is regarding data consistency and Debezium helps us avoid dual writes and maintains data consistency between our RDBMS and Kafka which makes it easier to ensure data consistency in all further layers.

    Debezium enables low overhead change data capture and now we have ended up defaulting to enabling Debezium for all new data sources being created.

    Debezium’s support for a wide variety of data sources, PostgreSQL, MySQL and MongoDB specifically, helps us provide a standard technology and platform to perform data integration on. No more having to write custom code to connect each data source.

    Debezium being open source proved to be immensely useful in the early days to make sure we were able to send in patches for a few bugs ourselves without having to ask someone to prioritise the issue. And since it’s open source there is a growing community around it which can help you figure out your issues and provide general guidance. Check out this page on the Debezium website for a lot of awesome community contributed content.

    Challenges

    Having said the above Debezium is still quite a young project and has a few areas in which improvement will be welcome (and your contributions too in the form of code, design, ideas, documentation and even blog posts):

    • Zero-downtime high availability. Debezium relies on the Kafka Connect framework to provide high availability but it does not provide something similar to a hot standby instance. It takes time for an existing connector to shut down and a new instance to come up - which might be acceptable for a few use-cases but unacceptable in others. See this blog post by BlaBlaCar for a discussion and their solution around it.

    • Support for other data sinks besides Kafka. In a few scenarios you might want to directly move the events from your database to an API, a different data store or maybe a different message broker. But since Debezium is currently written on top of Kafka Connect it can only write the data into Kafka. Debezium does provide an embedded engine which you can use as a library to consume change events in your Java applications. See the documentation around embedding Debezium. In case you do end up writing a different adapter around Debezium to move data into a different destination, consider making it open source so that both you benefit by additional maintainers and the community benefits by getting new use cases solved.

    • Common framework to write any new CDC implementation. We particularly have a use case of performing CDC on top of AWS DynamoDB. Instead of writing a custom Kafka Connector from scratch, we can reuse the Debezium core framework and write only the DynamoDB specific parts. This will help prevent bugs since a lot of the existing flows and edge cases might have already been handled. There is ongoing work around this theme to refactor all existing Debezium connectors to use the common framework to make it easier to write new custom connectors. For an example of how to implement one, take a look at the Debezium incubator repository.

    • A few minor annoyances which are already tracked on the project’s issue tracker - specifically DBZ-1760 (skipping unparseable records), DBZ-1263 (update table whitelist for existing connector), DBZ-1723 (Reconnect to DB on failure), DBZ-823 (Parallel snapshots).

    Future Scope

    We do have a few tasks planned for the future to improve our existing workflow regarding Debezium and Kafka Connect.

    • Upgrading to Debezium v1.0. Debezium recently released the first 1.0 release with a number of new features including support for the CloudEvents format which we are looking towards to provide a unified message format for all data across the organisation.

    • Trying out the Outbox design pattern as documented at Reliable Microservices Data Exchange With the Outbox Pattern to unify application events and data change events. The outbox pattern also provides transactional guarantees across service boundaries in a microservices system - something everybody wants in an event based microservices architecture.

    • Setting up an Apache Atlas integration to automate the creation of data sources and tracking data lineage in Atlas to help with data governance and discoverability.

    • Writing and open sourcing an AWS DynamoDB CDC connector as a Debezium connector. Since we are using AWS DynamoDB too we need to provide the same capabilities that the other data sources are using in terms of CDC. For that we are writing a DynamoDB CDC connector using Debezium as a framework. The work is still in its early stages and is planned to be released as an open source connector.

    So overall, we started the post by sharing our business use-case and discussed how Debezium has helped us solve them. We then detailed how we have been running Debezium in production for performing CDC on PostgreSQL on AWS RDS and talked about the mistakes we made when starting out and how to solve them. And as is common in software engineering, we did face production incidents along the way and are sharing our learnings from that incident in the hopes that they might be useful for the wider community.

    Also a lot of thanks to the people who reviewed this post including Gunnar Morling, Kapil Bharati and Akash Deep Verma.

    Further Reading

    Ashhar Hasan

    Ashhar is a software engineer at Delhivery where he focuses on change data capture, data warehousing, and creating event-driven systems.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/03/05/db2-cdc-approaches/index.html b/blog/2020/03/05/db2-cdc-approaches/index.html index 48d2dc4c76..3fa7f5d70b 100644 --- a/blog/2020/03/05/db2-cdc-approaches/index.html +++ b/blog/2020/03/05/db2-cdc-approaches/index.html @@ -1 +1 @@ - Approaches to Running Change Data Capture for Db2

    We have developed a Debezium connector for usage with Db2 which is now available as part of the Debezium incubator. Here we describe the use case we have for Change Data Capture (CDC), the various approaches that already exist in the Db2 ecology, and how we came to Debezium. In addition, we motivate the approach we took to implementing the Db2 Debezium connector.

    Background: Bringing Data to a Datalake

    In 2016 IBM started an effort to build a single platform on which IBM’s Enterprise Data could be ingested, managed and processed: the Cognitive Enterprise Data Platform (CEDP). IBM Research was one of the major contributors to this project. One of the fundamental activities was bringing data from geographically distributed data centers to the platform. The ingestion into the Datalake used a wide variety of technologies.

    Figure 1. CEDP Logical Architecture

    A significant fraction of this enterprise data is gathered in relational databases present in existing data-warehouses and datamarts. These are generally production systems whose primary usage is as "systems of record" for marketing, sales, human resources etc. As these are systems run by IBM for IBM unsurprisingly they are mainly some variant of IBM’s Db2.

    Getting Data From Db2 Efficiently

    Data is ingested into an immutable Landing Zone within the Datalake. This Landing Zone is implemented as a HDFS instance. Streaming data, e.g. news, is moved from the source using Kafka and then written into HDFS using the appropriate connector.

    One of our key design objective is automation. There are over 5,000 relational database tables from over 200 different sources ingested every day. In order to scale the data processing platform - aside from the governance processes that allow data owners to bring data to the platform - the ingestion itself must be self-service.

    Initially relational data was always bulk loaded from the source using Sqoop. A REST Interface is made available such that the data owners can configure when the data should be moved, e.g. periodically, trigger on event etc. A Sqoop ingestion is a distributed set of tasks each of which uses a JDBC connection to read part of a relational database table, generate a file based representation of the data, e.g. Parquet, and then store it on HDFS. With Sqoop we can completely refresh the data, or append to it, However we cannot modify the data incrementally.

    From a practical point of view this limits the periodicity with which data can be updated. Some of the larger tables represent tens of GBytes of compressed Parquet. While Sqoop allows many tasks to be run in parallel for the same table the bottleneck is typically the network across the WAN and/or rate controlling at the source database system itself. Often only a small fraction of the table is modified on any particular day, meaning that a huge amount of data is sent unnecessarily.

    To address these issues we introduced the use of Change Data Capture (CDC) for the movement of data across the WAN. Ingestion in CDC mode into a storage system designed for files that are never modified is problematic. While some recent work like Deltalakes or Hive 3.0 have started introducing delta changes into the Hadoop ecosystem, these were not mature enough for our needs.

    As an alternative we use the concept of a Relational Database Drop Zone in which data owners can instantiate shadows of their database and from which we then ingest into HDFS. As the Drop Zone and Landing Zone are in the same data center and the ingesting of data is a highly parallelizable task, the actual ingestion of large tables was typically orders of magnitude faster than the transferring of the data from the source.

    Data owners could move data using whatever tool they preferred into their Drop Zone. In particular they could transfer changes to data obtained through CDC.

    CDC systems are almost as old as relational databases themselves. Typically these were designed for purposes of back-up or failure recovery and were designed for use by a database administrator.

    Db2 has a long pedigree being over 40 years old and running on a wide set of operating systems including zOS, AIX, Linux and Windows. It has evolved a large set of distinct tools for CDC for uses in different contexts. We started exploring the use of IBM’s SQL Replication. Once tables are put into CDC mode by the admin, a capture agent is started that reads changes made to those tables from the transaction log. The changes are stored in dedicated CDC Tables. At the remote database an apply agent periodically reads the changes from these CDC tables and updates the shadow tables.

    While conceptually this is quite simple in practice it is difficult to automate for the following reasons:

    • Source and sink are tightly coupled and therefore the same table cannot easily be replicated to multiple different target database systems.

    • If the source system was already using replication on a table e.g. for back-up purposes, we cannot use this method to replicate to the Datalake.

    • Elevated privileges are required on the source. Data owners give read access to their system for Sqoop, but giving administrator poses compliance problems.

    • Elevated privileges are required on the sink. For simplicity our Drop Zone is a single Db2 system with database instances for each of the data sources. Allowing the data owners to set up SQL Replication to the Drop Zone would allow them access to each other’s instances, which is a compliance violation.

    • The tools are designed for system admins and as a result there are a large number of gotchas for the unwary. For example, care must be taken in selecting a wide range of parameters such as: the mode that the transaction log has to be in to allow CDC, the time the last backup was taken, whether the database is row or column oriented etc.

    • It is a Db2-specific solution; although the majority of the relational data sources were Db2, we also had Netezza, MySQL and SQL Server sources.

    We found in practice that the combination of the above meant that it was impractical to allow data owners to use IBM SQL Replication as a CDC mechanism for the Datalake.

    IBM offers another set of tools for data replication called IBM InfoSphere Data Replication (IIDR). This is sold as a product distinct from Db2. IIDR is not a Db2 specific solution, working for a wide range of relational databases as well as non-relational data storage systems, e.g. file systems. In essence IIDR has source agents and sink agents. The source and sink agent run at or close to the target system. Source agents read the changes and propagate them via a wide range of protocols including TCP sockets, MQ, Shared Files etc. to the sink agent. The source and sink agents are configured via an entity called the Access Server through which sources are connected to sinks and the tables to capture are specified. The Access Server is itself typically controlled via a Graphical User Interface by a system administrator.

    Thus for example we can have a Db2 source agent and an IIDR Kafka sink agent that behaves like a standard Apache Kafka Connect source connector, i.e. it writes change events into a Kafka topic. The initial records are Upsert messages (REFRESH phase) and subsequent changes are propagated as a sequence of Upsert/Delete Messages (MIRROR phase).

    IIDR makes the system more loosely coupled and less Db2 specific. However, it is still not simple to automate. In essence we need to be able to allow a data owner to specify the source database system and the tables to replicate via a REST call and automatically configure and deploy the necessary agents and Access Server on our Kubernetes cluster. As we can not run on the source system itself we catalog the remote Db2 system to look like it was local and ran the agent on that.

    IIDR assumes the agent runs on the same hardware architecture as the relational database system. The IIDR agent uses a low level Db2 API to read the transaction log. Many of our sources systems are running on AIX/PowerPC while the Kubernetes platform on which the agents are deployed runs on Linux/Intel. This leads to endianness compatibility problems.

    There are two limitations to this approach:

    • IIDR is designed to be monitored and managed by a system administrator. Trying to capture the actions and responses of an administrator via scripts that parse these logs and attempt to react to failure in IIDR can only be brittle. As long as nothing misbehaves the system runs fine, but if something fails (network interruption, Kubernetes proxy failure, LDAP being down, etc.) it is almost impossible to automate the appropriate response.

    • While touching the source system as little as possible was an admirable objective, from a practical point of view it is almost impossible on a production system to run the CDC system independently of the source system. If a system admin reloads from back-up an older version of a table or radically changes the DDL of that table the CDC system must be aware that this has occurred and take the appropriate action. In the case of changing the DDL, a new version of the table is created and consequently a new version of the KTable in turn must be created.

    We saw these and many more problems when trying to use the above approach for using CDC against real production systems. We concluded that the administration of the CDC system and the source system cannot be done independently and that to a large extent our problems came from trying to use IIDR for a use-case for which it was not intended.

    Approaches to Implementing a Debezium Db2 Connector

    When Debezium became available we started evaluating it for our purposes. As it works with a wide range of relational database systems and is open source we could imagine that database administrators would allow it to be used to generate a representation of their data for downstream applications. Essentially, the Debezium system would become an extension of the database source system. Debezium is not required to produce an identical copy of the database tables (unlike IIDR or SQL Replication). Typically the downstream application are for auxiliary tasks, i.e. analytics, not for fail over, meaning problems such as preserving precise types are less pressing. For example, if a time-stamp field is represented as a string in Elasticsearch it is not the end of the world.

    The only concern we had with Debezium was that it didn’t have a connector for Db2.

    Two approaches presented themselves:

    • Use the low level Db2 API to read directly the transaction log like IIDR does.

    • Use the SQL Replication CDC capture tables to read capture tables using SQL.

    An investigation of the code concluded that the model used by the already existing connector for Microsoft SQL Server could be largely reused for Db2. In essence:

    • The SQL queries to poll the changes are different

    • The structure and nature of the Logical Sequence Number (LSN) are different

    • The fact that Db2 distinguishes between a database system and a database while SQL Server does not needs to be accounted for.

    Otherwise everything else could be reused. Thus we adapted the existing SQL Server code base to implement the Db2 connector.

    Future Work/Extensions

    Benchmarking

    The Connectors for Db2 and SQL Server use a polling model i.e. the connectors periodically query the CDC table to determine what has changed since the last time they polled. A natural question is what is the "optimal" polling frequency given the fact that polling itself has a cost, i.e. what are the trade-offs between latency and load ?

    We are interested in building a general purpose framework for benchmarking systems in order to get a better understanding of where the trade-offs are in terms of latency, throughput of the CDC system and load on the source system.

    Db2 Notification System

    Rather than building a polling connector for Db2 it would also be possible to create a notification system. We considered this, but decided the polling connector was simpler for a first implementation.

    One way to build a notification connector for Db2 would be to:

    • Identifying change events by the usage of OS file system watchers (Linux or Windows). This can monitor the transaction log directory of the Db2 database and send events when files are modified or created.

    • Determining the exact nature of the event by reading the actual table changes with the db2ReadLog API. In principle this API can be invoked remotely as a service.

    • Determining the related Db2 data structure via SQL connection, e.g. table DDL.

    The Debezium event-driven Db2 connector would wait on notifications and then read the actual changes via db2ReadLog and SQL. This would require the watcher agent to run locally on the database system, similarly to the capture server.

    DML v DDL Changes

    Change Data Capture (CDC) systems propagate modifications at the source tables made via Data Manipulation Language (DML) operations such as INSERT, DELETE etc. They do not explicitly handle changes to the source table made via Data Definition Language (DDL) operations such as TRUNCATE, ALTER etc. It is not really clear what the behavior of Debezium should be made when a DDL change occurs. We are looking at exploring what the Debezium model should be for changes of this sort.

    Conclusion

    While it is attractive to assume new enterprise data systems are built completely from scratch it will almost certainly be necessary to interact with existing relational database systems for some considerable time. Debezium is a promising framework for connecting existing enterprise data systems into data processing platforms such as Datalakes. Our work currently at IBM Research is focusing on building hybrid-cloud data-orchestration systems with Kafka and Debezium being central components.

    Luis Garcés-Erice

    Luis is a Research Staff Member specialized on Distributed Systems working at IBM Research's Zurich Laboratory.

     

    Sean Rooney

    Sean Rooney is a Research Staff Member at IBM Research Zurich where he works on Hybrid Cloud Systems.

     

    Peter Urbanetz

    Peter is a Software Engineer at IBM Research in Zurich

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Approaches to Running Change Data Capture for Db2

    We have developed a Debezium connector for usage with Db2 which is now available as part of the Debezium incubator. Here we describe the use case we have for Change Data Capture (CDC), the various approaches that already exist in the Db2 ecology, and how we came to Debezium. In addition, we motivate the approach we took to implementing the Db2 Debezium connector.

    Background: Bringing Data to a Datalake

    In 2016 IBM started an effort to build a single platform on which IBM’s Enterprise Data could be ingested, managed and processed: the Cognitive Enterprise Data Platform (CEDP). IBM Research was one of the major contributors to this project. One of the fundamental activities was bringing data from geographically distributed data centers to the platform. The ingestion into the Datalake used a wide variety of technologies.

    Figure 1. CEDP Logical Architecture

    A significant fraction of this enterprise data is gathered in relational databases present in existing data-warehouses and datamarts. These are generally production systems whose primary usage is as "systems of record" for marketing, sales, human resources etc. As these are systems run by IBM for IBM unsurprisingly they are mainly some variant of IBM’s Db2.

    Getting Data From Db2 Efficiently

    Data is ingested into an immutable Landing Zone within the Datalake. This Landing Zone is implemented as a HDFS instance. Streaming data, e.g. news, is moved from the source using Kafka and then written into HDFS using the appropriate connector.

    One of our key design objective is automation. There are over 5,000 relational database tables from over 200 different sources ingested every day. In order to scale the data processing platform - aside from the governance processes that allow data owners to bring data to the platform - the ingestion itself must be self-service.

    Initially relational data was always bulk loaded from the source using Sqoop. A REST Interface is made available such that the data owners can configure when the data should be moved, e.g. periodically, trigger on event etc. A Sqoop ingestion is a distributed set of tasks each of which uses a JDBC connection to read part of a relational database table, generate a file based representation of the data, e.g. Parquet, and then store it on HDFS. With Sqoop we can completely refresh the data, or append to it, However we cannot modify the data incrementally.

    From a practical point of view this limits the periodicity with which data can be updated. Some of the larger tables represent tens of GBytes of compressed Parquet. While Sqoop allows many tasks to be run in parallel for the same table the bottleneck is typically the network across the WAN and/or rate controlling at the source database system itself. Often only a small fraction of the table is modified on any particular day, meaning that a huge amount of data is sent unnecessarily.

    To address these issues we introduced the use of Change Data Capture (CDC) for the movement of data across the WAN. Ingestion in CDC mode into a storage system designed for files that are never modified is problematic. While some recent work like Deltalakes or Hive 3.0 have started introducing delta changes into the Hadoop ecosystem, these were not mature enough for our needs.

    As an alternative we use the concept of a Relational Database Drop Zone in which data owners can instantiate shadows of their database and from which we then ingest into HDFS. As the Drop Zone and Landing Zone are in the same data center and the ingesting of data is a highly parallelizable task, the actual ingestion of large tables was typically orders of magnitude faster than the transferring of the data from the source.

    Data owners could move data using whatever tool they preferred into their Drop Zone. In particular they could transfer changes to data obtained through CDC.

    CDC systems are almost as old as relational databases themselves. Typically these were designed for purposes of back-up or failure recovery and were designed for use by a database administrator.

    Db2 has a long pedigree being over 40 years old and running on a wide set of operating systems including zOS, AIX, Linux and Windows. It has evolved a large set of distinct tools for CDC for uses in different contexts. We started exploring the use of IBM’s SQL Replication. Once tables are put into CDC mode by the admin, a capture agent is started that reads changes made to those tables from the transaction log. The changes are stored in dedicated CDC Tables. At the remote database an apply agent periodically reads the changes from these CDC tables and updates the shadow tables.

    While conceptually this is quite simple in practice it is difficult to automate for the following reasons:

    • Source and sink are tightly coupled and therefore the same table cannot easily be replicated to multiple different target database systems.

    • If the source system was already using replication on a table e.g. for back-up purposes, we cannot use this method to replicate to the Datalake.

    • Elevated privileges are required on the source. Data owners give read access to their system for Sqoop, but giving administrator poses compliance problems.

    • Elevated privileges are required on the sink. For simplicity our Drop Zone is a single Db2 system with database instances for each of the data sources. Allowing the data owners to set up SQL Replication to the Drop Zone would allow them access to each other’s instances, which is a compliance violation.

    • The tools are designed for system admins and as a result there are a large number of gotchas for the unwary. For example, care must be taken in selecting a wide range of parameters such as: the mode that the transaction log has to be in to allow CDC, the time the last backup was taken, whether the database is row or column oriented etc.

    • It is a Db2-specific solution; although the majority of the relational data sources were Db2, we also had Netezza, MySQL and SQL Server sources.

    We found in practice that the combination of the above meant that it was impractical to allow data owners to use IBM SQL Replication as a CDC mechanism for the Datalake.

    IBM offers another set of tools for data replication called IBM InfoSphere Data Replication (IIDR). This is sold as a product distinct from Db2. IIDR is not a Db2 specific solution, working for a wide range of relational databases as well as non-relational data storage systems, e.g. file systems. In essence IIDR has source agents and sink agents. The source and sink agent run at or close to the target system. Source agents read the changes and propagate them via a wide range of protocols including TCP sockets, MQ, Shared Files etc. to the sink agent. The source and sink agents are configured via an entity called the Access Server through which sources are connected to sinks and the tables to capture are specified. The Access Server is itself typically controlled via a Graphical User Interface by a system administrator.

    Thus for example we can have a Db2 source agent and an IIDR Kafka sink agent that behaves like a standard Apache Kafka Connect source connector, i.e. it writes change events into a Kafka topic. The initial records are Upsert messages (REFRESH phase) and subsequent changes are propagated as a sequence of Upsert/Delete Messages (MIRROR phase).

    IIDR makes the system more loosely coupled and less Db2 specific. However, it is still not simple to automate. In essence we need to be able to allow a data owner to specify the source database system and the tables to replicate via a REST call and automatically configure and deploy the necessary agents and Access Server on our Kubernetes cluster. As we can not run on the source system itself we catalog the remote Db2 system to look like it was local and ran the agent on that.

    IIDR assumes the agent runs on the same hardware architecture as the relational database system. The IIDR agent uses a low level Db2 API to read the transaction log. Many of our sources systems are running on AIX/PowerPC while the Kubernetes platform on which the agents are deployed runs on Linux/Intel. This leads to endianness compatibility problems.

    There are two limitations to this approach:

    • IIDR is designed to be monitored and managed by a system administrator. Trying to capture the actions and responses of an administrator via scripts that parse these logs and attempt to react to failure in IIDR can only be brittle. As long as nothing misbehaves the system runs fine, but if something fails (network interruption, Kubernetes proxy failure, LDAP being down, etc.) it is almost impossible to automate the appropriate response.

    • While touching the source system as little as possible was an admirable objective, from a practical point of view it is almost impossible on a production system to run the CDC system independently of the source system. If a system admin reloads from back-up an older version of a table or radically changes the DDL of that table the CDC system must be aware that this has occurred and take the appropriate action. In the case of changing the DDL, a new version of the table is created and consequently a new version of the KTable in turn must be created.

    We saw these and many more problems when trying to use the above approach for using CDC against real production systems. We concluded that the administration of the CDC system and the source system cannot be done independently and that to a large extent our problems came from trying to use IIDR for a use-case for which it was not intended.

    Approaches to Implementing a Debezium Db2 Connector

    When Debezium became available we started evaluating it for our purposes. As it works with a wide range of relational database systems and is open source we could imagine that database administrators would allow it to be used to generate a representation of their data for downstream applications. Essentially, the Debezium system would become an extension of the database source system. Debezium is not required to produce an identical copy of the database tables (unlike IIDR or SQL Replication). Typically the downstream application are for auxiliary tasks, i.e. analytics, not for fail over, meaning problems such as preserving precise types are less pressing. For example, if a time-stamp field is represented as a string in Elasticsearch it is not the end of the world.

    The only concern we had with Debezium was that it didn’t have a connector for Db2.

    Two approaches presented themselves:

    • Use the low level Db2 API to read directly the transaction log like IIDR does.

    • Use the SQL Replication CDC capture tables to read capture tables using SQL.

    An investigation of the code concluded that the model used by the already existing connector for Microsoft SQL Server could be largely reused for Db2. In essence:

    • The SQL queries to poll the changes are different

    • The structure and nature of the Logical Sequence Number (LSN) are different

    • The fact that Db2 distinguishes between a database system and a database while SQL Server does not needs to be accounted for.

    Otherwise everything else could be reused. Thus we adapted the existing SQL Server code base to implement the Db2 connector.

    Future Work/Extensions

    Benchmarking

    The Connectors for Db2 and SQL Server use a polling model i.e. the connectors periodically query the CDC table to determine what has changed since the last time they polled. A natural question is what is the "optimal" polling frequency given the fact that polling itself has a cost, i.e. what are the trade-offs between latency and load ?

    We are interested in building a general purpose framework for benchmarking systems in order to get a better understanding of where the trade-offs are in terms of latency, throughput of the CDC system and load on the source system.

    Db2 Notification System

    Rather than building a polling connector for Db2 it would also be possible to create a notification system. We considered this, but decided the polling connector was simpler for a first implementation.

    One way to build a notification connector for Db2 would be to:

    • Identifying change events by the usage of OS file system watchers (Linux or Windows). This can monitor the transaction log directory of the Db2 database and send events when files are modified or created.

    • Determining the exact nature of the event by reading the actual table changes with the db2ReadLog API. In principle this API can be invoked remotely as a service.

    • Determining the related Db2 data structure via SQL connection, e.g. table DDL.

    The Debezium event-driven Db2 connector would wait on notifications and then read the actual changes via db2ReadLog and SQL. This would require the watcher agent to run locally on the database system, similarly to the capture server.

    DML v DDL Changes

    Change Data Capture (CDC) systems propagate modifications at the source tables made via Data Manipulation Language (DML) operations such as INSERT, DELETE etc. They do not explicitly handle changes to the source table made via Data Definition Language (DDL) operations such as TRUNCATE, ALTER etc. It is not really clear what the behavior of Debezium should be made when a DDL change occurs. We are looking at exploring what the Debezium model should be for changes of this sort.

    Conclusion

    While it is attractive to assume new enterprise data systems are built completely from scratch it will almost certainly be necessary to interact with existing relational database systems for some considerable time. Debezium is a promising framework for connecting existing enterprise data systems into data processing platforms such as Datalakes. Our work currently at IBM Research is focusing on building hybrid-cloud data-orchestration systems with Kafka and Debezium being central components.

    Luis Garcés-Erice

    Luis is a Research Staff Member specialized on Distributed Systems working at IBM Research's Zurich Laboratory.

     

    Sean Rooney

    Sean Rooney is a Research Staff Member at IBM Research Zurich where he works on Hybrid Cloud Systems.

     

    Peter Urbanetz

    Peter is a Software Engineer at IBM Research in Zurich

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/03/13/debezium-1-1-c1-released/index.html b/blog/2020/03/13/debezium-1-1-c1-released/index.html index 4643b22e02..959054a340 100644 --- a/blog/2020/03/13/debezium-1-1-c1-released/index.html +++ b/blog/2020/03/13/debezium-1-1-c1-released/index.html @@ -35,4 +35,4 @@ }

    In order to use such custom converter, compile it and add the JAR to the plug-in directory of the connector. Configure it like so:

    converters=isbn
     isbn.type=com.example.IsbnConverter
     # custom options as needed
    -isbn.foo=bar

    Other Features and Changes

    The MongoDB connector has been migrated to the common CDC connector framework we’ve been started to develop for the SQL Server and Oracle connectors. This is going to significantly simplify the maintenance of the code base for us, as many cross-cutting features can be implemented in one central place. As an example, the MongoDB connector now supports the metrics you already can use with the relational connectors (DBZ-845), allowing you to monitor that connector in production.

    The Postgres connector supports automatic reconnects now in case the database connection has been lost (DBZ-1723). Again this has been largely implemented as a generic facility, the Postgres connector is the first one to make use of this. We’ve seen increased reports of Postgres disconnects in specific environments, which is why we decided this to be the first connector to support reconnects.

    When streaming change events to other relational databases, the column.propagate.source.type connector option comes in handy for propagating the exact column definition as a schema header. So far, this was to be configured per each individual column; as of this release, this option can be set globally for column types, drastically reducing the need for configuration when working with many columns of a specific type whose schema information should be exported (DBZ-1830).

    Another improvement for MongoDB users is the new support for exporting information about the sharding key in update and delete events (DBZ-1781).

    As always, a good number of bugs was fixed, too. Overall, 44 issues were addressed for Debezium 1.1.0.CR1. Please refer to the release notes for more details.

    An open-source project like Debezium would be nothing without its community of contributors. The following people have contributed to this release: Alan Zhangzf, Fabio Cantarini, Hossein Torabi, JanHendrikDolling, John Graf, Raúl Tovar and Ruslan Gibaiev. Thank you to each and eveyone of you!

    With the CR1 release being done, it won’t be much longer until Debezium 1.1 Final. Depending on issues found with this release candidate, we might do a CR2 release, followed by the Final shortly thereafter. For plans for future versions please refer to the roadmap and let us know about your requirements. Our general plan is to adopt a cadence of quarterly minor releases, i.e. you can expect Debezium 1.2 in about three months from now, 1.3 in Q3 and so on.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +isbn.foo=bar

    Other Features and Changes

    The MongoDB connector has been migrated to the common CDC connector framework we’ve been started to develop for the SQL Server and Oracle connectors. This is going to significantly simplify the maintenance of the code base for us, as many cross-cutting features can be implemented in one central place. As an example, the MongoDB connector now supports the metrics you already can use with the relational connectors (DBZ-845), allowing you to monitor that connector in production.

    The Postgres connector supports automatic reconnects now in case the database connection has been lost (DBZ-1723). Again this has been largely implemented as a generic facility, the Postgres connector is the first one to make use of this. We’ve seen increased reports of Postgres disconnects in specific environments, which is why we decided this to be the first connector to support reconnects.

    When streaming change events to other relational databases, the column.propagate.source.type connector option comes in handy for propagating the exact column definition as a schema header. So far, this was to be configured per each individual column; as of this release, this option can be set globally for column types, drastically reducing the need for configuration when working with many columns of a specific type whose schema information should be exported (DBZ-1830).

    Another improvement for MongoDB users is the new support for exporting information about the sharding key in update and delete events (DBZ-1781).

    As always, a good number of bugs was fixed, too. Overall, 44 issues were addressed for Debezium 1.1.0.CR1. Please refer to the release notes for more details.

    An open-source project like Debezium would be nothing without its community of contributors. The following people have contributed to this release: Alan Zhangzf, Fabio Cantarini, Hossein Torabi, JanHendrikDolling, John Graf, Raúl Tovar and Ruslan Gibaiev. Thank you to each and eveyone of you!

    With the CR1 release being done, it won’t be much longer until Debezium 1.1 Final. Depending on issues found with this release candidate, we might do a CR2 release, followed by the Final shortly thereafter. For plans for future versions please refer to the roadmap and let us know about your requirements. Our general plan is to adopt a cadence of quarterly minor releases, i.e. you can expect Debezium 1.2 in about three months from now, 1.3 in Q3 and so on.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/03/19/integration-testing-for-change-data-capture-with-testcontainers/index.html b/blog/2020/03/19/integration-testing-for-change-data-capture-with-testcontainers/index.html index b9f3d7a6ea..0c162ea6c7 100644 --- a/blog/2020/03/19/integration-testing-for-change-data-capture-with-testcontainers/index.html +++ b/blog/2020/03/19/integration-testing-for-change-data-capture-with-testcontainers/index.html @@ -103,4 +103,4 @@ assertThat(JsonPath.<String> read(changeEvent.value(), "$.after.title")) .isEqualTo("Learn Debezium"); -consumer.unsubscribe();
    1 Create a table in the Postgres database and insert two records
    2 Register an instance of the Debezium Postgres connector
    3 Read two records from the change event topic in Kafka and assert their attributes

    Note how Debezium’s Testcontainers support allows to seed the connector configuration from the database container, avoiding the need to give the database connection properties explicitly. Only the unique database.server.name must be given, and of course you could apply other configuration options such as table or column filters, SMTs and more.

    The source code for the drain() method for reading a given number of records from a Kafka topic is omitted for the sake of brevity. You can find it in the full example on GitHub.

    JsonPath-based assertions come in handy for asserting the attributes of the expecting data change events, but of course you could also use any other JSON API for the job. When using Apache Avro instead of JSON as a serialization format, you’d have to use the Avro APIs instead.

    Wrap-Up

    Testcontainers and Debezium’s support for it make it fairly easy to write automated integration tests for your CDC set-up.

    The testing approach discussed in this post could be expanded in multiple ways. E.g. it might be desirable to put your connector configuration under revision control (so you can manage and track any configuration changes) and drive the test using these configuration files. You also might take things one step further and test your entire data streaming pipeline. To do so, you’d have to deploy not only the Debezium connector(s), but also a sink connector, e.g. for your data warehouse or search server. You could then run assertions against the data in those sink systems, ensuring the correctness of your data pipeline end-to-end.

    What’s your take on testing CDC set-ups and pipelines? Let us know in the comments below!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +consumer.unsubscribe();
    1 Create a table in the Postgres database and insert two records
    2 Register an instance of the Debezium Postgres connector
    3 Read two records from the change event topic in Kafka and assert their attributes

    Note how Debezium’s Testcontainers support allows to seed the connector configuration from the database container, avoiding the need to give the database connection properties explicitly. Only the unique database.server.name must be given, and of course you could apply other configuration options such as table or column filters, SMTs and more.

    The source code for the drain() method for reading a given number of records from a Kafka topic is omitted for the sake of brevity. You can find it in the full example on GitHub.

    JsonPath-based assertions come in handy for asserting the attributes of the expecting data change events, but of course you could also use any other JSON API for the job. When using Apache Avro instead of JSON as a serialization format, you’d have to use the Avro APIs instead.

    Wrap-Up

    Testcontainers and Debezium’s support for it make it fairly easy to write automated integration tests for your CDC set-up.

    The testing approach discussed in this post could be expanded in multiple ways. E.g. it might be desirable to put your connector configuration under revision control (so you can manage and track any configuration changes) and drive the test using these configuration files. You also might take things one step further and test your entire data streaming pipeline. To do so, you’d have to deploy not only the Debezium connector(s), but also a sink connector, e.g. for your data warehouse or search server. You could then run assertions against the data in those sink systems, ensuring the correctness of your data pipeline end-to-end.

    What’s your take on testing CDC set-ups and pipelines? Let us know in the comments below!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/03/24/debezium-1-1-final-released/index.html b/blog/2020/03/24/debezium-1-1-final-released/index.html index 7ec2a13095..92bc27ab88 100644 --- a/blog/2020/03/24/debezium-1-1-final-released/index.html +++ b/blog/2020/03/24/debezium-1-1-final-released/index.html @@ -1 +1 @@ - Debezium 1.1.0.Final Released

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    Besides these key features, there’s many other smaller improvements such as reconnects for the Postgres connector, more flexibility when extracting the after state from change events, and more powerful options for propagating metadata on the source types of captured columns.

    Since the CR1 release, 22 more issues were resolved: mostly bug fixes, documentation improvements and related to the stabilization of some flaky tests that’d fail intermittently on our CI server.

    Besides these, there’s one very useful improvement for the Postgres connector: as part of its heartbeat functionality, it can now regularly execute DML operations in the source database (DBZ-1815). This helps to prevent situations with a combination of multiple databases on the same database host, receiving writes with different frequencies, where Debezium couldn’t acknowledge processed WAL offsets with the source database otherwise.

    Overall, 123 issues have been resolved for Debezium 1.1 and its preview releases. Please refer to the original announcements (Alpha1, Beta1, Beta2, CR1) to learn more about the details.

    The most important part of any open-source project is its community. The following people have contributed to Debezium 1.1, bumping the total number of community members contributing to Debezium to more than 175:

    A big, big "thank you" to each and every one of you!

    Having shipped Debezium 1.1, the team is focusing on 1.2 now. Adhering to a quarterly release cadence, Debezium 1.2 is scheduled for end of June. You can expect preview releases roughly every three weeks.

    What’s coming up in 1.2? Check out the roadmap to learn more; one key feature will be a "standalone mode" for running Debezium independently of Kafka Connect, allowing to feed change events to other messaging infrastructure such as Amazon Kinesis. Also make sure to let us know about your suggestions, requirements and feature requests!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.1.0.Final Released

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    Besides these key features, there’s many other smaller improvements such as reconnects for the Postgres connector, more flexibility when extracting the after state from change events, and more powerful options for propagating metadata on the source types of captured columns.

    Since the CR1 release, 22 more issues were resolved: mostly bug fixes, documentation improvements and related to the stabilization of some flaky tests that’d fail intermittently on our CI server.

    Besides these, there’s one very useful improvement for the Postgres connector: as part of its heartbeat functionality, it can now regularly execute DML operations in the source database (DBZ-1815). This helps to prevent situations with a combination of multiple databases on the same database host, receiving writes with different frequencies, where Debezium couldn’t acknowledge processed WAL offsets with the source database otherwise.

    Overall, 123 issues have been resolved for Debezium 1.1 and its preview releases. Please refer to the original announcements (Alpha1, Beta1, Beta2, CR1) to learn more about the details.

    The most important part of any open-source project is its community. The following people have contributed to Debezium 1.1, bumping the total number of community members contributing to Debezium to more than 175:

    A big, big "thank you" to each and every one of you!

    Having shipped Debezium 1.1, the team is focusing on 1.2 now. Adhering to a quarterly release cadence, Debezium 1.2 is scheduled for end of June. You can expect preview releases roughly every three weeks.

    What’s coming up in 1.2? Check out the roadmap to learn more; one key feature will be a "standalone mode" for running Debezium independently of Kafka Connect, allowing to feed change events to other messaging infrastructure such as Amazon Kinesis. Also make sure to let us know about your suggestions, requirements and feature requests!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/03/31/debezium-newsletter-01-2020/index.html b/blog/2020/03/31/debezium-newsletter-01-2020/index.html index ed96378bf3..1465ee69f2 100644 --- a/blog/2020/03/31/debezium-newsletter-01-2020/index.html +++ b/blog/2020/03/31/debezium-newsletter-01-2020/index.html @@ -1 +1 @@ - Debezium's Newsletter 01/2020

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    Upcoming Events

    Due to the corona virus situation, many conferences the Debezium team had planned to attend, have been postponed or even cancelled. E.g. JavaDay Istanbul has been moved to September, and QCon Sao Paulo to December. We hope the situation will have improved by then and look forward to meeting again with the Debezium community in person eventually.

    Until then, there’s a few virtual events you can enjoy; there’ll be a Debezium session at the Red Hat Summit 2020 - Virtual Experience. We’re also planning to do another episode on Debezium at DevNation Live. If you’d like to have a session on Debezium at your virtual meetup or conference, please get in touch!

    Articles

    There have been a number of blog posts about Debezium lately; here are some of the latest ones that you should not miss:

    Please also check out our compiled list of resources around Debezium for even more related posts, articles and presentations.

    Examples

    An example is an excellent way to get a better understanding of how or why something behaves as it does. Debezium’s examples repository has undergone several changes recently we’d like to highlight:

    We also discovered a very helpful tool for visualizing the contents of Docker Compose files. So we’ve begun to add diagrams like this one for the kstreams-live-update demo to the examples, helping to familiarize with the examples more easily:

    KStreams Live Update Example Topology

    Time to Upgrade

    Debezium version 1.1.0.Final was released last week. If you are using an older version, we urge you to check out the latest major release. For details on the bug fixes, enhancements, and improvements that spanned 5 releases, check out the release-notes.

    The Debezium team has also begun active development on the next major version, 1.2. The major focus in 1.2 is implementing a standalone container to run Debezium without Apache Kafka and Connect, enabling users to send change events to Kinesis and other platforms more easily.

    Keep an eye on our releases page to get a jump start on what bug fixes, enhancements, and changes will be coming in 1.2 as they become available.

    Using Debezium?

    Our community users page includes a variety of organizations that are currently using Debezium. If you are a user of Debezium and would like to be included, please send us a GitHub pull request or reach out to us directly through our community channels found here.

    And if you haven’t yet done so, please consider adding a ⭐ for the GitHub repo; keep them coming, we’re almost at 3,000 stars!

    Getting Involved

    It can often be overwhelming when starting to work on an existing code base. We welcome community contributions and we want to make the process of getting started extremely easy. Below is a list of open issues that are currently labeled with easy-starter if you want to dive in quick.

    • Configure Avro serialization automatically when detecting link to schema registry (DBZ-59)

    • Support CREATE TABLE …​ LIKE syntax for blacklisted source table (DBZ-1496)

    • Explore SMT for Externalizing large column values (DBZ-1541)

    • Update the tutorial to use the Debezium tooling container image (DBZ-1572)

    • Debezium for SQL Server does not support reconnecting after the connection is broken (DBZ-1882)

    Feedback

    We intend to publish new additions to this newsletter periodically. Should anyone have any suggestions on changes or what could be highlighted here, we welcome that feedback. You can reach out to us via any of our community channels found here.

    And most importantly, stay safe and healthy wherever you are!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium's Newsletter 01/2020

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    Upcoming Events

    Due to the corona virus situation, many conferences the Debezium team had planned to attend, have been postponed or even cancelled. E.g. JavaDay Istanbul has been moved to September, and QCon Sao Paulo to December. We hope the situation will have improved by then and look forward to meeting again with the Debezium community in person eventually.

    Until then, there’s a few virtual events you can enjoy; there’ll be a Debezium session at the Red Hat Summit 2020 - Virtual Experience. We’re also planning to do another episode on Debezium at DevNation Live. If you’d like to have a session on Debezium at your virtual meetup or conference, please get in touch!

    Articles

    There have been a number of blog posts about Debezium lately; here are some of the latest ones that you should not miss:

    Please also check out our compiled list of resources around Debezium for even more related posts, articles and presentations.

    Examples

    An example is an excellent way to get a better understanding of how or why something behaves as it does. Debezium’s examples repository has undergone several changes recently we’d like to highlight:

    We also discovered a very helpful tool for visualizing the contents of Docker Compose files. So we’ve begun to add diagrams like this one for the kstreams-live-update demo to the examples, helping to familiarize with the examples more easily:

    KStreams Live Update Example Topology

    Time to Upgrade

    Debezium version 1.1.0.Final was released last week. If you are using an older version, we urge you to check out the latest major release. For details on the bug fixes, enhancements, and improvements that spanned 5 releases, check out the release-notes.

    The Debezium team has also begun active development on the next major version, 1.2. The major focus in 1.2 is implementing a standalone container to run Debezium without Apache Kafka and Connect, enabling users to send change events to Kinesis and other platforms more easily.

    Keep an eye on our releases page to get a jump start on what bug fixes, enhancements, and changes will be coming in 1.2 as they become available.

    Using Debezium?

    Our community users page includes a variety of organizations that are currently using Debezium. If you are a user of Debezium and would like to be included, please send us a GitHub pull request or reach out to us directly through our community channels found here.

    And if you haven’t yet done so, please consider adding a ⭐ for the GitHub repo; keep them coming, we’re almost at 3,000 stars!

    Getting Involved

    It can often be overwhelming when starting to work on an existing code base. We welcome community contributions and we want to make the process of getting started extremely easy. Below is a list of open issues that are currently labeled with easy-starter if you want to dive in quick.

    • Configure Avro serialization automatically when detecting link to schema registry (DBZ-59)

    • Support CREATE TABLE …​ LIKE syntax for blacklisted source table (DBZ-1496)

    • Explore SMT for Externalizing large column values (DBZ-1541)

    • Update the tutorial to use the Debezium tooling container image (DBZ-1572)

    • Debezium for SQL Server does not support reconnecting after the connection is broken (DBZ-1882)

    Feedback

    We intend to publish new additions to this newsletter periodically. Should anyone have any suggestions on changes or what could be highlighted here, we welcome that feedback. You can reach out to us via any of our community channels found here.

    And most importantly, stay safe and healthy wherever you are!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/04/09/using-debezium-wit-apicurio-api-schema-registry/index.html b/blog/2020/04/09/using-debezium-wit-apicurio-api-schema-registry/index.html index bd4fe21cd9..df14e13504 100644 --- a/blog/2020/04/09/using-debezium-wit-apicurio-api-schema-registry/index.html +++ b/blog/2020/04/09/using-debezium-wit-apicurio-api-schema-registry/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/blog/2020/04/09/using-debezium-with-apicurio-api-schema-registry/index.html b/blog/2020/04/09/using-debezium-with-apicurio-api-schema-registry/index.html index bdaffb8eb7..c2074b7e2a 100644 --- a/blog/2020/04/09/using-debezium-with-apicurio-api-schema-registry/index.html +++ b/blog/2020/04/09/using-debezium-with-apicurio-api-schema-registry/index.html @@ -439,4 +439,4 @@ "value.converter.apicurio.registry.global-id": "io.apicurio.registry.utils.serde.strategy.GetOrCreateIdStrategy", } -}

    Conclusion

    In this article we discussed multiple approaches to message/schema association. The Apicurio registry was presented as a solution for schema sotrage and versioning and we have demonstrated how Apicurio can be integrated with Debezium connectors to efficiently deliver messages with schema to the consumer.

    You can find a complete example for using the Debezium connectors together with the Apicurio registry in the tutorial project of the Debezium examples repository on GitHub.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    Conclusion

    In this article we discussed multiple approaches to message/schema association. The Apicurio registry was presented as a solution for schema sotrage and versioning and we have demonstrated how Apicurio can be integrated with Debezium connectors to efficiently deliver messages with schema to the consumer.

    You can find a complete example for using the Debezium connectors together with the Apicurio registry in the tutorial project of the Debezium examples repository on GitHub.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/04/16/debezium-1-2-alpha1-released/index.html b/blog/2020/04/16/debezium-1-2-alpha1-released/index.html index aeeaefffe2..103237c209 100644 --- a/blog/2020/04/16/debezium-1-2-alpha1-released/index.html +++ b/blog/2020/04/16/debezium-1-2-alpha1-released/index.html @@ -22,4 +22,4 @@ transforms.filter.type=io.debezium.transforms.Filter transforms.filter.language=jsr223.groovy transforms.filter.condition=value.after.customerType != 42 -...

    value is the change event’s value; you could also refer to the event’s key and even the corresponding schema objects. Groovy automatically resolves property paths such as value.after.customerType to look-ups in map-like data structures such as Kafka Connect’s Struct type. This allows for very concise filtering conditions.

    Note this SMT is incubating state for now, i.e. details around its API and configuration surface may still change. Please give it a try and share your experiences.

    Other Features

    Besides these key features, there’s a number of other new functionalities coming with the 1.2.0.Alpha1 release:

    • New metrics NumberOfDisconnects and NumberOfPrimaryElections for the MongoDB connector (DBZ-1859)

    • Support for automatic reconnects after connection losses in the SQL Server connector (DBZ-1882)

    • New column masking mode "consistent hashing" (DBZ-1692): Debezium allows to mask specific column values, e.g. to satisfy concerns around data privacy and protection. Using the new "consistent hashing" mode it’s now possible to not only use asterisks as masking characters, but also hash values based on the masked data contents. Quoting the original issue reporter, this "will be useful for [anonymizing] data but in this case it still needs to be relatable between topics. It’s a typical requirement for warehouses where you want to anonymize sensitive data but still need to keep referential integrity of your data"

    • Allowing to link update change events in case of primary key updates (DBZ-1531): most relational Debezium connectors represent an update to the primary key of a record by a delete event using the old key and a subsequent insert event using the updated key; using the new record headers __debezium.newkey and __debezium.oldkey, it is now possible for consumers to link these change events together when working with change data from the MySQL and Postgres connectors

    • Upgrade of Debezium’s container images to Apache Kafka 2.4.1 (DBZ-1925)

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • High CPU usage when the Postgres connector is idle (DBZ-1960)

    • Empty wal2json empty change event could cause NPE (DBZ-1922)

    • Cassandra Connector: unable to deserialize column mutation with reversed type (DBZ-1967)

    • Outbox Quarkus Extension throws NPE in quarkus:dev mode (DBZ-1966)

    • Validation of binlog_row_image is not compatible with MySQL 5.5 (DBZ-1950)

    Please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions. We’ve also backported the critical bugfixes to the 1.1 branch and will release Debezium 1.1.1 tomorrow.

    A big thank you to all the contributors from the community who worked on this release: Alexander Iskuskov, Alexander Schwartz, Bingqin Zhou, Fatih Güçlü Akkaya, Grant Cooksey, Jan-Hendrik Dolling, Luis Garcés-Erice, Nayana Hettiarachchi and René Kerner!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +...

    value is the change event’s value; you could also refer to the event’s key and even the corresponding schema objects. Groovy automatically resolves property paths such as value.after.customerType to look-ups in map-like data structures such as Kafka Connect’s Struct type. This allows for very concise filtering conditions.

    Note this SMT is incubating state for now, i.e. details around its API and configuration surface may still change. Please give it a try and share your experiences.

    Other Features

    Besides these key features, there’s a number of other new functionalities coming with the 1.2.0.Alpha1 release:

    • New metrics NumberOfDisconnects and NumberOfPrimaryElections for the MongoDB connector (DBZ-1859)

    • Support for automatic reconnects after connection losses in the SQL Server connector (DBZ-1882)

    • New column masking mode "consistent hashing" (DBZ-1692): Debezium allows to mask specific column values, e.g. to satisfy concerns around data privacy and protection. Using the new "consistent hashing" mode it’s now possible to not only use asterisks as masking characters, but also hash values based on the masked data contents. Quoting the original issue reporter, this "will be useful for [anonymizing] data but in this case it still needs to be relatable between topics. It’s a typical requirement for warehouses where you want to anonymize sensitive data but still need to keep referential integrity of your data"

    • Allowing to link update change events in case of primary key updates (DBZ-1531): most relational Debezium connectors represent an update to the primary key of a record by a delete event using the old key and a subsequent insert event using the updated key; using the new record headers __debezium.newkey and __debezium.oldkey, it is now possible for consumers to link these change events together when working with change data from the MySQL and Postgres connectors

    • Upgrade of Debezium’s container images to Apache Kafka 2.4.1 (DBZ-1925)

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • High CPU usage when the Postgres connector is idle (DBZ-1960)

    • Empty wal2json empty change event could cause NPE (DBZ-1922)

    • Cassandra Connector: unable to deserialize column mutation with reversed type (DBZ-1967)

    • Outbox Quarkus Extension throws NPE in quarkus:dev mode (DBZ-1966)

    • Validation of binlog_row_image is not compatible with MySQL 5.5 (DBZ-1950)

    Please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions. We’ve also backported the critical bugfixes to the 1.1 branch and will release Debezium 1.1.1 tomorrow.

    A big thank you to all the contributors from the community who worked on this release: Alexander Iskuskov, Alexander Schwartz, Bingqin Zhou, Fatih Güçlü Akkaya, Grant Cooksey, Jan-Hendrik Dolling, Luis Garcés-Erice, Nayana Hettiarachchi and René Kerner!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/05/07/debezium-1-2-beta1-released/index.html b/blog/2020/05/07/debezium-1-2-beta1-released/index.html index 8d6f060106..84cd93b32e 100644 --- a/blog/2020/05/07/debezium-1-2-beta1-released/index.html +++ b/blog/2020/05/07/debezium-1-2-beta1-released/index.html @@ -90,4 +90,4 @@ transforms.route.type=io.debezium.transforms.Filter transforms.route.language=jsr223.graal.js transforms.route.topic.expression=value.after.ordertype == 'B2C' ? 'b2c_orders' : 'b2b_orders' -...

    Further Changes and Bugfixes

    Some other features and fixes of this release include:

    • Support for Postgres JSON, JSONB, TIME and TIMESTAMP array columns (DBZ-1969, DBZ-1990)

    • Column whitelists for the Postgres connector, which comes in handy if you’re interested in only capturing a small subset of table columns (DBZ-1962)

    • MySQL’s FLUSH TABLE statement is handled correctly (DBZ-2047)

    • Unique namespaces are used in routed outbox events (DBZ-1963)

    • Fixed a potential value overflow in Postgres BIT VARYING columns (DBZ-1949)

    • Support for the eventType field has been removed from the outbox routing SMT (DBZ-2014); if needed, please configure this field explicitly as header or message value attribute; this was done to allow for exporting this field using any custom name which was not easily possible before

    • Improved start-up performance for the Postgres connector when using enum columns (DBZ-2038)

    Please refer to the release notes for the list of all addressed issues and upgrading procedures.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +...

    Further Changes and Bugfixes

    Some other features and fixes of this release include:

    • Support for Postgres JSON, JSONB, TIME and TIMESTAMP array columns (DBZ-1969, DBZ-1990)

    • Column whitelists for the Postgres connector, which comes in handy if you’re interested in only capturing a small subset of table columns (DBZ-1962)

    • MySQL’s FLUSH TABLE statement is handled correctly (DBZ-2047)

    • Unique namespaces are used in routed outbox events (DBZ-1963)

    • Fixed a potential value overflow in Postgres BIT VARYING columns (DBZ-1949)

    • Support for the eventType field has been removed from the outbox routing SMT (DBZ-2014); if needed, please configure this field explicitly as header or message value attribute; this was done to allow for exporting this field using any custom name which was not easily possible before

    • Improved start-up performance for the Postgres connector when using enum columns (DBZ-2038)

    Please refer to the release notes for the list of all addressed issues and upgrading procedures.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/05/19/debezium-1-2-beta2-released/index.html b/blog/2020/05/19/debezium-1-2-beta2-released/index.html index be76ae61ce..80a982e97f 100644 --- a/blog/2020/05/19/debezium-1-2-beta2-released/index.html +++ b/blog/2020/05/19/debezium-1-2-beta2-released/index.html @@ -1 +1 @@ - Debezium 1.2.0.Beta2 Released

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    Debezium Server

    Adding the Debezium Server runtime is a major milestone for the project. It is a ready-to-use standalone application for executing Debezium connectors. With Debezium Server, users can now choose from three different ways of operating Debezium, matching their individual needs:

    • As plug-ins for Kafka Connect, ingesting data change events into an Apache Kafka cluster

    • Through the Debezium Engine, embedded as a library into bespoke JVM-based applications

    • Via Debezium Server, sending data change events to a growing number of messaging platforms like Kinesis

    Which one of these modes of execution you should use depends on your specific prerequisites, requirements and CDC use cases. Organizations running Apache Kafka and interested in setting up no-code data integration pipelines leveraging a rich connector eco-system, should go for the Kafka Connect approach. In-application cache invalidation is an application benefitting from the Debezium embedded engine. Debezium Server finally is meant for users who would like to take advantage of Debezium’s CDC functionality, using messaging platforms other than Apache Kafka. While you could have done so before by means of the embedded engine and a bit of bespoke Java programming, Debezium Server will greatly simplify this scenario.

    Powered by the popular Quarkus stack, Debezium Server is a ready-made configurable Java application which runs a Debezium connector and propagates the produced change events to consumers via a chosen sink adapter. Initially supporting Amazon Kinesis, the Debezium Server architecture is extensible, and other adapters — e.g. for Google Cloud Pub/Sub or Microsoft Azure Event Hubs — will follow soon. Through the Debezium Server extension API, you can also implement custom sink adapters for your preferred infrastructure of propagating change events to consumers.

    Ultimately, Debezium Server also is means of realizing our vision of CDC-as-a-Service, smoothly integrated with cloud-native infrastructure like Kubernetes and Knative. This release marks the first step of this endavour, and we couldn’t be more excited about the prospect of working together with the Debezium community towards this goal.

    Stay tuned for more sink adapters, a container image, support for Knative Eventing, an operator for running Debezium Server on Kubernetes, and more!

    Other Features and Fixes

    Besides Debezium Server, a few other improvements and fixes found their way into this release. A number of improvements was done to the different single message transforms (SMTs) coming with Debezium:

    • The recently added SMTs for content-based change event filtering and routing can be applied to a sub-set of topics now (DBZ-2024)

    • Record headers and topic name are exposed to script expressions configured for these SMTs, so they can be evaluated by the filtering and routing logic (DBZ-2074)

    • The logical topic routing SMT can optionally pass through message keys as-is, instead of enriching them with a source topic identifier (DBZ-2034); this is very helpful when uniqueness of keys already is ensured across the different re-routed topics, e.g. when routing change events from the partition tables of a partitioned Postgres table into a single topic

    Debezium’s Testcontainers integration allows for the usage of custom container images for Kafka Connect now (DBZ-2070), which comes in handy if you want to leverage custom connectors, converters or SMTs in your integration tests. For the SQL Server connector it’s optionally possible now to skip the queries for obtaining LSN timestamps (DBZ-1988). This can help to signficantly increase through-put of the connector.

    Several fixes relate to the MySQL DDL parser, e.g. due to additional DDL capabilities in MySQL 8.0.x (DBZ-2080, "Unable to parse MySQL ALTER statement with named primary key", DBZ-2067; "Error and connector stops when DDL contains algorithm=instant") and when being used with MariaDB (DBZ-2062, "DDL statement throws error if compression keyword contains backticks (``)").

    As always, you can find the complete list of all addressed issues and upgrading procedures in the release notes.

    Many thanks to all the community members contributing to this release: Aaron Brady, Bingqin Zhou, Braden Groom, Fándly Gergő, Grant Cooksey, Joy Gao, Juan Antonio Pedraza, Max Kaplan, and Xuan Shen!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.2.0.Beta2 Released

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    Debezium Server

    Adding the Debezium Server runtime is a major milestone for the project. It is a ready-to-use standalone application for executing Debezium connectors. With Debezium Server, users can now choose from three different ways of operating Debezium, matching their individual needs:

    • As plug-ins for Kafka Connect, ingesting data change events into an Apache Kafka cluster

    • Through the Debezium Engine, embedded as a library into bespoke JVM-based applications

    • Via Debezium Server, sending data change events to a growing number of messaging platforms like Kinesis

    Which one of these modes of execution you should use depends on your specific prerequisites, requirements and CDC use cases. Organizations running Apache Kafka and interested in setting up no-code data integration pipelines leveraging a rich connector eco-system, should go for the Kafka Connect approach. In-application cache invalidation is an application benefitting from the Debezium embedded engine. Debezium Server finally is meant for users who would like to take advantage of Debezium’s CDC functionality, using messaging platforms other than Apache Kafka. While you could have done so before by means of the embedded engine and a bit of bespoke Java programming, Debezium Server will greatly simplify this scenario.

    Powered by the popular Quarkus stack, Debezium Server is a ready-made configurable Java application which runs a Debezium connector and propagates the produced change events to consumers via a chosen sink adapter. Initially supporting Amazon Kinesis, the Debezium Server architecture is extensible, and other adapters — e.g. for Google Cloud Pub/Sub or Microsoft Azure Event Hubs — will follow soon. Through the Debezium Server extension API, you can also implement custom sink adapters for your preferred infrastructure of propagating change events to consumers.

    Ultimately, Debezium Server also is means of realizing our vision of CDC-as-a-Service, smoothly integrated with cloud-native infrastructure like Kubernetes and Knative. This release marks the first step of this endavour, and we couldn’t be more excited about the prospect of working together with the Debezium community towards this goal.

    Stay tuned for more sink adapters, a container image, support for Knative Eventing, an operator for running Debezium Server on Kubernetes, and more!

    Other Features and Fixes

    Besides Debezium Server, a few other improvements and fixes found their way into this release. A number of improvements was done to the different single message transforms (SMTs) coming with Debezium:

    • The recently added SMTs for content-based change event filtering and routing can be applied to a sub-set of topics now (DBZ-2024)

    • Record headers and topic name are exposed to script expressions configured for these SMTs, so they can be evaluated by the filtering and routing logic (DBZ-2074)

    • The logical topic routing SMT can optionally pass through message keys as-is, instead of enriching them with a source topic identifier (DBZ-2034); this is very helpful when uniqueness of keys already is ensured across the different re-routed topics, e.g. when routing change events from the partition tables of a partitioned Postgres table into a single topic

    Debezium’s Testcontainers integration allows for the usage of custom container images for Kafka Connect now (DBZ-2070), which comes in handy if you want to leverage custom connectors, converters or SMTs in your integration tests. For the SQL Server connector it’s optionally possible now to skip the queries for obtaining LSN timestamps (DBZ-1988). This can help to signficantly increase through-put of the connector.

    Several fixes relate to the MySQL DDL parser, e.g. due to additional DDL capabilities in MySQL 8.0.x (DBZ-2080, "Unable to parse MySQL ALTER statement with named primary key", DBZ-2067; "Error and connector stops when DDL contains algorithm=instant") and when being used with MariaDB (DBZ-2062, "DDL statement throws error if compression keyword contains backticks (``)").

    As always, you can find the complete list of all addressed issues and upgrading procedures in the release notes.

    Many thanks to all the community members contributing to this release: Aaron Brady, Bingqin Zhou, Braden Groom, Fándly Gergő, Grant Cooksey, Joy Gao, Juan Antonio Pedraza, Max Kaplan, and Xuan Shen!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/06/11/debezium-1-2-cr1-released/index.html b/blog/2020/06/11/debezium-1-2-cr1-released/index.html index 8016f8c0d6..d2f2eabd9d 100644 --- a/blog/2020/06/11/debezium-1-2-cr1-released/index.html +++ b/blog/2020/06/11/debezium-1-2-cr1-released/index.html @@ -1 +1 @@ - Debezium 1.2.0.CR1 Released

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.2.0.CR1 Released

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/06/24/debezium-1-2-final-released/index.html b/blog/2020/06/24/debezium-1-2-final-released/index.html index 4a88a8296b..084ec89de8 100644 --- a/blog/2020/06/24/debezium-1-2-final-released/index.html +++ b/blog/2020/06/24/debezium-1-2-final-released/index.html @@ -1 +1 @@ - Debezium 1.2.0.Final Released

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    Debezium Server

    Should I pick one feature I’m the most excited about, it would be Debezium Server. It allows even more users to benefit from open-source change data capture with Debezium, no matter which messaging infrastructure they are on. Being able to propagate Debezium data change events via managed services such as Kinesis and Pub/Sub was a feature request we’ve received again and again from the community, and it’s amazing to see that this feature eventually becomes a reality.

    Debezium Server will also be a great foundation for exposing Debezium as a native Knative Eventing event source, and you can expect to see more exciting developments in this area very soon. With Debezium Server, there’s now three ways for running Debezium:

    • Via Kafka Connect, using its fantastic eco-system of connectors to set up low-latency data streaming pipelines with Apache Kafka

    • With Debezium Server, sending data change events to a growing number of messaging platforms like Kinesis

    • Through the Debezium Engine, embedded as a library into custom JVM-based applications, e.g. addressing use cases like updating embedded caches

    I’m really excited about all the opportunties which this brings to the Debezium community. Debezium Server is powered by the innovative Quarkus framework, which also opens up many interesting technical possibilities. E.g. we could explore running Debezium connectors as native binaries via GraalVM, resulting in a largely reduced memory consumption, which could make this a very interesting deployment option for cloud environments.

    A Big Thank You to the Community!

    Please refer to the original announcements (Alpha1, Beta1, Beta2, CR1) to learn more about all the new features of Debezium 1.2. You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    An open-source project would be nothing without its community of users in contributors; thanks a lot to the following people from the community who worked on the Debezium 1.2 release:

    This brings the total number of contributors to the main Debezium code repositories to over 200!

    Outlook

    Let’s close this post with an outlook to the next things to come. We’ll stick to our quarterly release cadence, i.e. you can expect Debezium 1.3 to be out by the end of September, with preview releases in between every three weeks.

    We’re currently updating the roadmap for the next release, and your input and feedback on this will be very welcomed! The things we’ve planned so far include more flexible options for snapshotting (re-snapshotting specific tables, filter changes, parallelized snapshotting, etc.), moving Db2 connector out of incubating state, and an exploration of what it’d take to officially support MariaDB.

    Onwards and upwards!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.2.0.Final Released

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    Debezium Server

    Should I pick one feature I’m the most excited about, it would be Debezium Server. It allows even more users to benefit from open-source change data capture with Debezium, no matter which messaging infrastructure they are on. Being able to propagate Debezium data change events via managed services such as Kinesis and Pub/Sub was a feature request we’ve received again and again from the community, and it’s amazing to see that this feature eventually becomes a reality.

    Debezium Server will also be a great foundation for exposing Debezium as a native Knative Eventing event source, and you can expect to see more exciting developments in this area very soon. With Debezium Server, there’s now three ways for running Debezium:

    • Via Kafka Connect, using its fantastic eco-system of connectors to set up low-latency data streaming pipelines with Apache Kafka

    • With Debezium Server, sending data change events to a growing number of messaging platforms like Kinesis

    • Through the Debezium Engine, embedded as a library into custom JVM-based applications, e.g. addressing use cases like updating embedded caches

    I’m really excited about all the opportunties which this brings to the Debezium community. Debezium Server is powered by the innovative Quarkus framework, which also opens up many interesting technical possibilities. E.g. we could explore running Debezium connectors as native binaries via GraalVM, resulting in a largely reduced memory consumption, which could make this a very interesting deployment option for cloud environments.

    A Big Thank You to the Community!

    Please refer to the original announcements (Alpha1, Beta1, Beta2, CR1) to learn more about all the new features of Debezium 1.2. You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    An open-source project would be nothing without its community of users in contributors; thanks a lot to the following people from the community who worked on the Debezium 1.2 release:

    This brings the total number of contributors to the main Debezium code repositories to over 200!

    Outlook

    Let’s close this post with an outlook to the next things to come. We’ll stick to our quarterly release cadence, i.e. you can expect Debezium 1.3 to be out by the end of September, with preview releases in between every three weeks.

    We’re currently updating the roadmap for the next release, and your input and feedback on this will be very welcomed! The things we’ve planned so far include more flexible options for snapshotting (re-snapshotting specific tables, filter changes, parallelized snapshotting, etc.), moving Db2 connector out of incubating state, and an exploration of what it’d take to officially support MariaDB.

    Onwards and upwards!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/07/16/debezium-1-2-1-final-released/index.html b/blog/2020/07/16/debezium-1-2-1-final-released/index.html index cb401afb72..f0fe3d68ca 100644 --- a/blog/2020/07/16/debezium-1-2-1-final-released/index.html +++ b/blog/2020/07/16/debezium-1-2-1-final-released/index.html @@ -1 +1 @@ - Debezium 1.2.1.Final Released

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    If you’re using the Apicurio open-source API and schema registry for managing the JSON and Avro schemas of your Debezium connectors, then things got a bit simpler for you: the Debezium container image for Kafka Connect comes with the required converters out of the box now (DBZ-2083).

    Overall, 34 issues were fixed for this release; please refer to the release notes for the full list of addressed issues, upgrade procedures, and notes on any backward compatibility changes.

    Many thanks to all the members from the Debezium community contributing to this release:

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.2.1.Final Released

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    If you’re using the Apicurio open-source API and schema registry for managing the JSON and Avro schemas of your Debezium connectors, then things got a bit simpler for you: the Debezium container image for Kafka Connect comes with the required converters out of the box now (DBZ-2083).

    Overall, 34 issues were fixed for this release; please refer to the release notes for the full list of addressed issues, upgrade procedures, and notes on any backward compatibility changes.

    Many thanks to all the members from the Debezium community contributing to this release:

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/07/28/hello-debezium/index.html b/blog/2020/07/28/hello-debezium/index.html index 41183a18df..6a3adfdce6 100644 --- a/blog/2020/07/28/hello-debezium/index.html +++ b/blog/2020/07/28/hello-debezium/index.html @@ -1 +1 @@ - Hello Debezium Team!

    Hello everyone, my name is René Kerner and I recently joined Red Hat and the Debezium team.

    I was working at trivago since 2011, and in 2016 we started using Debezium at version 0.4/0.5 for capturing clickstreams in the offshore datacenters into Kafka and aggregate them in the central cluster. We really intensified Debezium usage within one year and in 2017 we also used it for trivago’s main data.

    In 2014 I did my first OSS contributions to Composer, PHP’s dependency management and gave my first talk on it at the Developer Conference (called code.talks for many years now). Then in 2017 I did my first contributions to Debezium with work on the MySQL snapshot process and fixing a MySQL TIME data type issue.

    In 2018 I left trivago and started working at Codecentric as a consultant for software architecture and development (mainly JVM focus) and Apache Kafka, doing many trainings and workshops at German "Fortune 500" companies (insurances, industrial sector, media). I was doing lots of networking at that time, where I learned how awesome the community around Kafka is. I was always quite sad I didn’t have more time to focus on OSS projects.

    Let me share a bit more of trivago’s story to Kafka and Debezium. Back in 2015/2016 we introduced Kafka (version 0.9, 0.10 times) at trivago to handle the transport of our clickstream data from offshore datacenters (US + Asia) into our central datacenter (EU).

    The first solution that we tried did its job, but getting messages in our desired format, which was Google Protocol Buffers / Protobuf, into Kafka was relatively hard. Furthermore the codebase of that tool wasn’t very clean and extensions were ugly and kind of hard.

    With Kafka 0.9.0.0 Kafka Connect was introduced and stabilized over the next months. And in winter 2016/2017 we discovered Debezium. A tool that was based on Kafka Connect, with a much cleaner codebase and an easy, extendible way to apply our requirements regarding Protobuf format and behaviour (with SMTs/Single Message Transforms, released in Kafka 0.10.2)

    Since these "old times" Debezium made very big development with many new connectors and since July I’m now part of the Debezium team, and I am proud and excited to work on such a great OSS project because there are still awesome things on Debezium’s Roadmap: the new IBM Db2 connector will leave incubation state, Debezium Server will be easier to operate on Kubernetes, and a PoC for a future Debezium management UI, and more and more to come.

    Furthermore, I’ve been dedicating my work to many things around OSS, Kafka and Debezium for years, I always advocated people to use OSS, Kafka and Debezium, and supported them in different jobs and roles to introduce it or extend its usage. That’s why I’m really excited that I am able to focus my work to support Debezium and the Debezium community now!

    --René

    René Kerner

    René is a software engineer at Red Hat. Before he was working as software architect and engineer at trivago and as consultant at Codecentric. Now he's part of Debezium team. He lives in Mönchengladbach, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Hello Debezium Team!

    Hello everyone, my name is René Kerner and I recently joined Red Hat and the Debezium team.

    I was working at trivago since 2011, and in 2016 we started using Debezium at version 0.4/0.5 for capturing clickstreams in the offshore datacenters into Kafka and aggregate them in the central cluster. We really intensified Debezium usage within one year and in 2017 we also used it for trivago’s main data.

    In 2014 I did my first OSS contributions to Composer, PHP’s dependency management and gave my first talk on it at the Developer Conference (called code.talks for many years now). Then in 2017 I did my first contributions to Debezium with work on the MySQL snapshot process and fixing a MySQL TIME data type issue.

    In 2018 I left trivago and started working at Codecentric as a consultant for software architecture and development (mainly JVM focus) and Apache Kafka, doing many trainings and workshops at German "Fortune 500" companies (insurances, industrial sector, media). I was doing lots of networking at that time, where I learned how awesome the community around Kafka is. I was always quite sad I didn’t have more time to focus on OSS projects.

    Let me share a bit more of trivago’s story to Kafka and Debezium. Back in 2015/2016 we introduced Kafka (version 0.9, 0.10 times) at trivago to handle the transport of our clickstream data from offshore datacenters (US + Asia) into our central datacenter (EU).

    The first solution that we tried did its job, but getting messages in our desired format, which was Google Protocol Buffers / Protobuf, into Kafka was relatively hard. Furthermore the codebase of that tool wasn’t very clean and extensions were ugly and kind of hard.

    With Kafka 0.9.0.0 Kafka Connect was introduced and stabilized over the next months. And in winter 2016/2017 we discovered Debezium. A tool that was based on Kafka Connect, with a much cleaner codebase and an easy, extendible way to apply our requirements regarding Protobuf format and behaviour (with SMTs/Single Message Transforms, released in Kafka 0.10.2)

    Since these "old times" Debezium made very big development with many new connectors and since July I’m now part of the Debezium team, and I am proud and excited to work on such a great OSS project because there are still awesome things on Debezium’s Roadmap: the new IBM Db2 connector will leave incubation state, Debezium Server will be easier to operate on Kubernetes, and a PoC for a future Debezium management UI, and more and more to come.

    Furthermore, I’ve been dedicating my work to many things around OSS, Kafka and Debezium for years, I always advocated people to use OSS, Kafka and Debezium, and supported them in different jobs and roles to introduce it or extend its usage. That’s why I’m really excited that I am able to focus my work to support Debezium and the Debezium community now!

    --René

    René Kerner

    René is a software engineer at Red Hat. Before he was working as software architect and engineer at trivago and as consultant at Codecentric. Now he's part of Debezium team. He lives in Mönchengladbach, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/08/06/debezium-1-3-alpha1-released/index.html b/blog/2020/08/06/debezium-1-3-alpha1-released/index.html index 48d208c105..718aa43afe 100644 --- a/blog/2020/08/06/debezium-1-3-alpha1-released/index.html +++ b/blog/2020/08/06/debezium-1-3-alpha1-released/index.html @@ -1 +1 @@ - Debezium 1.3.0.Alpha1 Released

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    Azure Event Hubs sink adapter

    Debezium’s standalone server is one of the newest features in the Debezium ecosystem. The standalone server provides a ready-to-use application that can stream change events from a source database to a variety of messaging infrastructures.

    Driven by the community, the Debezium Server now supports Azure Event Hubs (DBZ-2282). This now enables the Debezium Server to stream change events to Amazon Kinesis, Apache Pulsar, Google Cloud Pub/Sub, and Azure Event Hubs.

    Additional MongoDB connection options

    The Debezium MongoDB connector has traditionally used the driver default connection options and timeouts. There are use cases where customization of these defaults are necessary to support latency or performance concerns in your deployment.

    There additional configuration options are now available for MongoDB:

    mongodb.connect.timeout.ms
    The number of milliseconds the driver waits for a new conneciton before the attempt is aborted.
    Defaults to 10000.

    mongodb.server.selection.timeout.ms
    The number of milliseconds the driver will wait to select a server before it times out, throwing an error.
    Defaults to 30000.

    mongodb.socket.timeout.ms
    The number of milliseconds the driver waits before a send/receive on the socket may timeout.
    A value of 0 disables this behavior. Defaults to 0.

    Other Features

    Besides these key features, there’s a number of other new features coming with the 1.3.0.Alpha1 release:

    • New SQL Server snapshot mode initial_only (DBZ-2379)

    • Postgres and possibly other DB connections are not properly shutdown when the task encounters thread interrupt (DBZ-2133)

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • Ignore non-existing table reported on Aurora via SHOW TABLES (DBZ-1939)

    • Cassandra connector not getting events (DBZ-2086)

    • PubSub Sink sends empty records (DBZ-2277)

    • Skipping LSN is inefficient and does not forward slot position (DBZ-2310)

    • message size is at least 68x larger for changes with bit varying columns (DBZ-2315)

    • Change events lost when connnector is restarted while processing transaction with PK update (DBZ-2329)

    • Error when processing commitLogs related to list-type columns (DBZ-2345)

    • Fix dependency groupId on Outbox Quarkus Extension documentation (DBZ-2367)

    • Cannot detect Azure Sql Version (DBZ-2373)

    • ParallelSnapshotReader sometimes throws NPE (DBZ-2387)

    Please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions.

    A big thank you to all the contributors from the community who worked on this release: Abhishek Gupta, Cory Harper, Arik Cohen, Moira Tagle, Victor Xiang, Grzegorz Kołakowski, Björn Häuser, Lukasz Korzeniowski, and Jonas Lins!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.3.0.Alpha1 Released

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    Azure Event Hubs sink adapter

    Debezium’s standalone server is one of the newest features in the Debezium ecosystem. The standalone server provides a ready-to-use application that can stream change events from a source database to a variety of messaging infrastructures.

    Driven by the community, the Debezium Server now supports Azure Event Hubs (DBZ-2282). This now enables the Debezium Server to stream change events to Amazon Kinesis, Apache Pulsar, Google Cloud Pub/Sub, and Azure Event Hubs.

    Additional MongoDB connection options

    The Debezium MongoDB connector has traditionally used the driver default connection options and timeouts. There are use cases where customization of these defaults are necessary to support latency or performance concerns in your deployment.

    There additional configuration options are now available for MongoDB:

    mongodb.connect.timeout.ms
    The number of milliseconds the driver waits for a new conneciton before the attempt is aborted.
    Defaults to 10000.

    mongodb.server.selection.timeout.ms
    The number of milliseconds the driver will wait to select a server before it times out, throwing an error.
    Defaults to 30000.

    mongodb.socket.timeout.ms
    The number of milliseconds the driver waits before a send/receive on the socket may timeout.
    A value of 0 disables this behavior. Defaults to 0.

    Other Features

    Besides these key features, there’s a number of other new features coming with the 1.3.0.Alpha1 release:

    • New SQL Server snapshot mode initial_only (DBZ-2379)

    • Postgres and possibly other DB connections are not properly shutdown when the task encounters thread interrupt (DBZ-2133)

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • Ignore non-existing table reported on Aurora via SHOW TABLES (DBZ-1939)

    • Cassandra connector not getting events (DBZ-2086)

    • PubSub Sink sends empty records (DBZ-2277)

    • Skipping LSN is inefficient and does not forward slot position (DBZ-2310)

    • message size is at least 68x larger for changes with bit varying columns (DBZ-2315)

    • Change events lost when connnector is restarted while processing transaction with PK update (DBZ-2329)

    • Error when processing commitLogs related to list-type columns (DBZ-2345)

    • Fix dependency groupId on Outbox Quarkus Extension documentation (DBZ-2367)

    • Cannot detect Azure Sql Version (DBZ-2373)

    • ParallelSnapshotReader sometimes throws NPE (DBZ-2387)

    Please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions.

    A big thank you to all the contributors from the community who worked on this release: Abhishek Gupta, Cory Harper, Arik Cohen, Moira Tagle, Victor Xiang, Grzegorz Kołakowski, Björn Häuser, Lukasz Korzeniowski, and Jonas Lins!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/09/03/debezium-1-3-beta1-released/index.html b/blog/2020/09/03/debezium-1-3-beta1-released/index.html index 01a23e490b..db9071cf32 100644 --- a/blog/2020/09/03/debezium-1-3-beta1-released/index.html +++ b/blog/2020/09/03/debezium-1-3-beta1-released/index.html @@ -1 +1 @@ - Debezium 1.3.0.Beta1 Released

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    Revised Filter Options and Documentation Wording

    Since the beginning of the Debezium project, there was support for specifying the tables and columns to capture. This is done via a range of configuration options like schema.whitelist, column.blacklist, etc.

    While nothing was wrong with those options from a technical perspective, we’ve come to realize that the terms "whitelist" and "blacklist" are problematic and that they may even be hurtful to some members of our community. This is why we’ve decided to deprecate the existing option names and replace them with counterparts which are not only more inclusive, but also more expressive when it comes to describing their purpose.

    The following changes have been made:

    Old Name New Name

    database.whitelist

    database.include.list

    database.blacklist

    database.exclude.list

    schema.whitelist

    schema.include.list

    schema.blacklist

    schema.exclude.list

    table.whitelist

    table.include.list

    table.blacklist

    table.exclude.list

    column.whitelist

    column.include.list

    column.blacklist

    column.exclude.list

    The renaming has been done for all the stable Debezium connectors as of this release; the options of the incubator connectors (Oracle, Db2, Cassandra) will be renamed in the next Debezium 1.3.x preview release. Note that for the sake of backwards-compatibility, the old option names still can be used during a transition period. In this case, e.g. when upgrading an existing connector instance to the new version, a warning will be logged upon connector start-up, and you should update your configuration accordingly.

    Besides renaming these filter options, we’ve also updated our documentation; in particular the description of supported database topologies has been updated from the previously used terms "master" and "slave" to "primary" (node) and "replica" (node).

    This change is part of a larger effort across Red Hat and the industry as a whole, and we’re very happy that we can contribute our share for making the world of open-source projects and its communities more welcoming and inclusive.

    Bug Fixes

    This release fixes a number of critical bugs:

    • Potentially lost change events with the Postgres connector, in case of connector restarts (DBZ-2338, DBZ-2397)

    • NullPointerException in the logical table router (DBZ-2412)

    • Snapshot fails if table or schema contain hyphens (DBZ-2452)

    • Misc. MySQL DDL parser fixes (DBZ-2413, DBZ-2415, DBZ-2425)

    Altogether, 20 issues were fixed for this release.

    Please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions.

    Many thanks to community members Bingqin Zhou and Ruslan Gibaiev for their contributions to this release!

    Outlook

    As you’d expect it, things went a bit slower during the summer with several folks taking some well-deserved time off. Now that everyone is back, Debezium development moves forward with full steam again, and you can expect some exciting new features coming soon: the ongoing work by the community towards a LogMiner-based implementation for Oracle should soon reach a state where it can be merged into the upstream Debezium repository.

    And a brand-new connector contributed by the community is showing up on the horizon, too; Bolt engineers Kewei Shang and Ruslan Gibaiev have been working on a CDC connector for the Vitess database and announced that they’d like to open-source and continue to evolve it under the Debezium umbrella.

    Exciting times for open-source change data capture and Debezium 🎉!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.3.0.Beta1 Released

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    Revised Filter Options and Documentation Wording

    Since the beginning of the Debezium project, there was support for specifying the tables and columns to capture. This is done via a range of configuration options like schema.whitelist, column.blacklist, etc.

    While nothing was wrong with those options from a technical perspective, we’ve come to realize that the terms "whitelist" and "blacklist" are problematic and that they may even be hurtful to some members of our community. This is why we’ve decided to deprecate the existing option names and replace them with counterparts which are not only more inclusive, but also more expressive when it comes to describing their purpose.

    The following changes have been made:

    Old Name New Name

    database.whitelist

    database.include.list

    database.blacklist

    database.exclude.list

    schema.whitelist

    schema.include.list

    schema.blacklist

    schema.exclude.list

    table.whitelist

    table.include.list

    table.blacklist

    table.exclude.list

    column.whitelist

    column.include.list

    column.blacklist

    column.exclude.list

    The renaming has been done for all the stable Debezium connectors as of this release; the options of the incubator connectors (Oracle, Db2, Cassandra) will be renamed in the next Debezium 1.3.x preview release. Note that for the sake of backwards-compatibility, the old option names still can be used during a transition period. In this case, e.g. when upgrading an existing connector instance to the new version, a warning will be logged upon connector start-up, and you should update your configuration accordingly.

    Besides renaming these filter options, we’ve also updated our documentation; in particular the description of supported database topologies has been updated from the previously used terms "master" and "slave" to "primary" (node) and "replica" (node).

    This change is part of a larger effort across Red Hat and the industry as a whole, and we’re very happy that we can contribute our share for making the world of open-source projects and its communities more welcoming and inclusive.

    Bug Fixes

    This release fixes a number of critical bugs:

    • Potentially lost change events with the Postgres connector, in case of connector restarts (DBZ-2338, DBZ-2397)

    • NullPointerException in the logical table router (DBZ-2412)

    • Snapshot fails if table or schema contain hyphens (DBZ-2452)

    • Misc. MySQL DDL parser fixes (DBZ-2413, DBZ-2415, DBZ-2425)

    Altogether, 20 issues were fixed for this release.

    Please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions.

    Many thanks to community members Bingqin Zhou and Ruslan Gibaiev for their contributions to this release!

    Outlook

    As you’d expect it, things went a bit slower during the summer with several folks taking some well-deserved time off. Now that everyone is back, Debezium development moves forward with full steam again, and you can expect some exciting new features coming soon: the ongoing work by the community towards a LogMiner-based implementation for Oracle should soon reach a state where it can be merged into the upstream Debezium repository.

    And a brand-new connector contributed by the community is showing up on the horizon, too; Bolt engineers Kewei Shang and Ruslan Gibaiev have been working on a CDC connector for the Vitess database and announced that they’d like to open-source and continue to evolve it under the Debezium umbrella.

    Exciting times for open-source change data capture and Debezium 🎉!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/09/15/debezium-auto-create-topics/index.html b/blog/2020/09/15/debezium-auto-create-topics/index.html index e62a5ba5e1..53bb6b08aa 100644 --- a/blog/2020/09/15/debezium-auto-create-topics/index.html +++ b/blog/2020/09/15/debezium-auto-create-topics/index.html @@ -74,4 +74,4 @@ $ kafka-topics.sh --bootstrap-server $HOSTNAME:9092 --describe --topic dbserver1.inventory.orders Topic: dbserver1.inventory.orders PartitionCount: 10 ReplicationFactor: 3 -Configs: compression.type=lz4,cleanup.policy=compact,segment.bytes=1073741824,delete.retention.ms=2592000000

    Conclusion

    In many, especially in production environments we often don’t want topic auto creation to be enabled on the Kafka broker side, or we need a different configuration than the default topic config.
    Prior Kafka 2.6 this was only possible when manually creating topics upfront or by some custom setup process, maybe during deployment.

    Since Kafka 2.6 Kafka Connect comes with built-in topic creation for connector topics and this article shows how to use it with Debezium.

    You can find an example here in the Debezium examples repository on GitHub.

    René Kerner

    René is a software engineer at Red Hat. Before he was working as software architect and engineer at trivago and as consultant at Codecentric. Now he's part of Debezium team. He lives in Mönchengladbach, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +Configs: compression.type=lz4,cleanup.policy=compact,segment.bytes=1073741824,delete.retention.ms=2592000000

    Conclusion

    In many, especially in production environments we often don’t want topic auto creation to be enabled on the Kafka broker side, or we need a different configuration than the default topic config.
    Prior Kafka 2.6 this was only possible when manually creating topics upfront or by some custom setup process, maybe during deployment.

    Since Kafka 2.6 Kafka Connect comes with built-in topic creation for connector topics and this article shows how to use it with Debezium.

    You can find an example here in the Debezium examples repository on GitHub.

    René Kerner

    René is a software engineer at Red Hat. Before he was working as software architect and engineer at trivago and as consultant at Codecentric. Now he's part of Debezium team. He lives in Mönchengladbach, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/09/16/debezium-1-3-beta2-released/index.html b/blog/2020/09/16/debezium-1-3-beta2-released/index.html index 3bbbc916bf..557b7eda4c 100644 --- a/blog/2020/09/16/debezium-1-3-beta2-released/index.html +++ b/blog/2020/09/16/debezium-1-3-beta2-released/index.html @@ -1 +1 @@ - Debezium 1.3.0.Beta2 Released

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    Column Filtering Improvements

    Addressing a long standing feature request (DBZ-1068), the Debezium connector for SQL Server supports now server-side column filtering: capture instances in the database itself can be configured so to only contain a sub-set of the captured table’s columns. That way, specific columns can be excluded by the CDC process right away, instead of only removing them in the Debezium connector, which is much more efficient for large BLOB for instance.

    The MySQL connector adds support for the column.include.list option already known from the Debezium Postgres connector (DBZ-2508).

    Related to the matter of filtering, following up on the work begun in the 1.3 Beta1 release around replacing the terms "master/slave", "blacklist" and "whitelist" with more inclusive alternatives, also all the incubating connectors (Oracle, Db2, Cassandra) use the new terms like "database.include.list", "primary/replica", etc. now (DBZ-2462).

    LogMiner-based Ingestion Engine for Oracle

    The Debezium Oracle connector can now use the LogMiner package to ingest change events. As this package comes with the Oracle database itself, it’s a very attractive alternative to the existing XStream-based implementation. Discussions and work towards LogMiner support have been happening in the Debezium community for a long time (as you already might have guessed from the very low issue number DBZ-137), so we’re particularly excited about this work being merged eventually and being part of this release.

    Note that there’s several follow-up tasks to be resolved related to the LogMiner-based ingestion implementation; while it is not recommended for production usage at this point, we’d love to get your feedback from testing and evaluating!

    A massive thank you to everyone involved with this: Andrey Ignatenko and his team for the main work, Andrey Pustovetov for his ideas around transaction buffering, Chris Cranford for picking up the PR and preparing it to get merged, Milo vd Zee for his extensive review, as well as everyone else commenting and providing feedback on the PR and Jira issue.

    Misc. Features and Bug Fixes

    In addition to these key features, the community has completed the work on some other features and fixes, too:

    • The MySQL connector supports the LOCK TABLES FOR BACKUP lock mode when being used with Percona Server for MySQL (DBZ-2466), which reduces contention during snapshots

    • The Postgres connector snapshot SPI got more flexible, allowing for custom implementations now that e.g. can re-snapshot selected tables (DBZ-2094)

    • The prefix of additional headers and fields produced by the event flattening SMTs is customizeable now (DBZ-2504)

    • Support for JSON functions in MySQL DDL statements (DBZ-2453)

    • Improved exception logging for the Cassandra connector (DBZ-2498)

    As always, please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions.

    Thank you so much to all the community members contributing to this release: Aaron Brady, Andrey Ignatenko, Bingqin Zhou, Eric Weaver, Grant Cooksey, Grzegorz Kołakowski, Guy Pascarella, James Gormley, Jos Huiting, Mickaël Isaert, and Nathan Mills.

    Outlook

    With the first cut of LogMiner support being merged and released, we’re now planning to focus on stabilization and bug fixing, with the Debezium 1.3 Final release to be expected around the end of the month.

    In parallel, work is happening on a new connector contributed by the community for the Vitess (which will - depending on progress of review - be released as an incubating connector either in Debezium 1.3 or 1.4), and we’re going to share some exciting efforts around a proof-of-concept for a potential future Debezium UI with you very soon!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.3.0.Beta2 Released

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    Column Filtering Improvements

    Addressing a long standing feature request (DBZ-1068), the Debezium connector for SQL Server supports now server-side column filtering: capture instances in the database itself can be configured so to only contain a sub-set of the captured table’s columns. That way, specific columns can be excluded by the CDC process right away, instead of only removing them in the Debezium connector, which is much more efficient for large BLOB for instance.

    The MySQL connector adds support for the column.include.list option already known from the Debezium Postgres connector (DBZ-2508).

    Related to the matter of filtering, following up on the work begun in the 1.3 Beta1 release around replacing the terms "master/slave", "blacklist" and "whitelist" with more inclusive alternatives, also all the incubating connectors (Oracle, Db2, Cassandra) use the new terms like "database.include.list", "primary/replica", etc. now (DBZ-2462).

    LogMiner-based Ingestion Engine for Oracle

    The Debezium Oracle connector can now use the LogMiner package to ingest change events. As this package comes with the Oracle database itself, it’s a very attractive alternative to the existing XStream-based implementation. Discussions and work towards LogMiner support have been happening in the Debezium community for a long time (as you already might have guessed from the very low issue number DBZ-137), so we’re particularly excited about this work being merged eventually and being part of this release.

    Note that there’s several follow-up tasks to be resolved related to the LogMiner-based ingestion implementation; while it is not recommended for production usage at this point, we’d love to get your feedback from testing and evaluating!

    A massive thank you to everyone involved with this: Andrey Ignatenko and his team for the main work, Andrey Pustovetov for his ideas around transaction buffering, Chris Cranford for picking up the PR and preparing it to get merged, Milo vd Zee for his extensive review, as well as everyone else commenting and providing feedback on the PR and Jira issue.

    Misc. Features and Bug Fixes

    In addition to these key features, the community has completed the work on some other features and fixes, too:

    • The MySQL connector supports the LOCK TABLES FOR BACKUP lock mode when being used with Percona Server for MySQL (DBZ-2466), which reduces contention during snapshots

    • The Postgres connector snapshot SPI got more flexible, allowing for custom implementations now that e.g. can re-snapshot selected tables (DBZ-2094)

    • The prefix of additional headers and fields produced by the event flattening SMTs is customizeable now (DBZ-2504)

    • Support for JSON functions in MySQL DDL statements (DBZ-2453)

    • Improved exception logging for the Cassandra connector (DBZ-2498)

    As always, please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions.

    Thank you so much to all the community members contributing to this release: Aaron Brady, Andrey Ignatenko, Bingqin Zhou, Eric Weaver, Grant Cooksey, Grzegorz Kołakowski, Guy Pascarella, James Gormley, Jos Huiting, Mickaël Isaert, and Nathan Mills.

    Outlook

    With the first cut of LogMiner support being merged and released, we’re now planning to focus on stabilization and bug fixing, with the Debezium 1.3 Final release to be expected around the end of the month.

    In parallel, work is happening on a new connector contributed by the community for the Vitess (which will - depending on progress of review - be released as an incubating connector either in Debezium 1.3 or 1.4), and we’re going to share some exciting efforts around a proof-of-concept for a potential future Debezium UI with you very soon!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/09/24/debezium-1-3-cr1-released/index.html b/blog/2020/09/24/debezium-1-3-cr1-released/index.html index a3834fabac..79dfc0bfc1 100644 --- a/blog/2020/09/24/debezium-1-3-cr1-released/index.html +++ b/blog/2020/09/24/debezium-1-3-cr1-released/index.html @@ -1,3 +1,3 @@ Debezium 1.3.0.CR1 Released

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    ByteBufferConverter improvements

    The ByteBufferConverter is a converter that is used with the Outbox event router SMT to serialize an existing Avro payload column. In a recent report (DBZ-2396), the ByteBufferConverter was unable to serialize events emitted from a connector that was configured to emit heartbeat, transaction metadata, or schema change events. In order to improve the converter’s compatibility when these events are emitted, the ByteBufferConverter can now be configured to delegate event serialization to an additional converter. This delegation is necessary so that heartbeat, transaction metadata, and schema change events (if applicable) can be serialized.

    In order to use the Outbox event router SMT and the ByteBufferConverter with these event types, the connector configuration must be changed to reflect the delegate converter and its configurable options.

    As an example to use the Apache Kafka JsonConverter as a delegate with schemas disabled, the following configuration would need to be included in the connector:

    value.converter=io.debezium.converters.ByteBufferConverter
     value.converter.delegate.converter.type=org.apache.kafka.connect.json.JsonConverter
    -value.converter.delegate.converter.type.schemas.enable=false

    For more information about using the ByteBufferConverter, please see the Using Avro as the payload format section in the Outbox event router documentation.

    Scripting module

    In this release, the SMTs for content-based routing and filtering that both use JSR 223 scripting engines have been moved out of debezium-core and into a separate artifact debezium-scripting (DBZ-2549). Any connector that previous used these SMTs requires that the new artifact be added to the plug-in directories for those connector(s).

    When using the Debezium container image for Kafka Connect, set the environment variable ENABLE_DEBEZIUM_SCRIPTING to true to enable this feature. This change is done to allow the scripting functionality to be available only in environments with an apppropriately secured Kafka Connect configuration interface.

    Misc. Features and Bug Fixes

    In addition, the community has completed the work on some other features and fixes, too:

    • Catch up streaming before snapshot may duplicate messages upon resuming streaming DBZ-2550

    • Fix Quarkus datasource configuration for Quarkus 1.9 DBZ-2558

    • Implement connection retry support for Oracle DBZ-2531

    As always, please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions.

    Thank you so much to Guillaume Smet and Grant Cooksey for their contributions to this release.

    Outlook

    Barring any unforeseen regressions and bug reports, Debezium 1.3 Final should be out next week. Until then, we’ll focus on some more polishing. The community-lead work towards a Debezium connector for Vitess is also making good progress, with an initial release of this new connector planned with Debezium 1.4 Alpha1 in late October.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +value.converter.delegate.converter.type.schemas.enable=false

    For more information about using the ByteBufferConverter, please see the Using Avro as the payload format section in the Outbox event router documentation.

    Scripting module

    In this release, the SMTs for content-based routing and filtering that both use JSR 223 scripting engines have been moved out of debezium-core and into a separate artifact debezium-scripting (DBZ-2549). Any connector that previous used these SMTs requires that the new artifact be added to the plug-in directories for those connector(s).

    When using the Debezium container image for Kafka Connect, set the environment variable ENABLE_DEBEZIUM_SCRIPTING to true to enable this feature. This change is done to allow the scripting functionality to be available only in environments with an apppropriately secured Kafka Connect configuration interface.

    Misc. Features and Bug Fixes

    In addition, the community has completed the work on some other features and fixes, too:

    • Catch up streaming before snapshot may duplicate messages upon resuming streaming DBZ-2550

    • Fix Quarkus datasource configuration for Quarkus 1.9 DBZ-2558

    • Implement connection retry support for Oracle DBZ-2531

    As always, please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading from earlier Debezium versions.

    Thank you so much to Guillaume Smet and Grant Cooksey for their contributions to this release.

    Outlook

    Barring any unforeseen regressions and bug reports, Debezium 1.3 Final should be out next week. Until then, we’ll focus on some more polishing. The community-lead work towards a Debezium connector for Vitess is also making good progress, with an initial release of this new connector planned with Debezium 1.4 Alpha1 in late October.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/10/01/debezium-1-3-final-released/index.html b/blog/2020/10/01/debezium-1-3-final-released/index.html index 5eaf41a05a..f0b98cbf95 100644 --- a/blog/2020/10/01/debezium-1-3-final-released/index.html +++ b/blog/2020/10/01/debezium-1-3-final-released/index.html @@ -1 +1 @@ - Debezium 1.3.0.Final Released

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    Please refer to the announcements of the preview releases (Alpha1, Beta1, Beta2, CR1) for more details. Since last week’s CR1 release, we’ve been focusing on ironing out some remaining bugs and improvements to the documentation. To learn more about procedures for upgrading from earlier Debezium versions, please take a look the release notes.

    Thank you to everyone testing the preview releases, this is of invaluable help for spotting and fixing short-comings in new features as well as regressions. And of course I’d also like to thank all the community members contributing to this release: Aaron Brady, Abhishek Gupta, Andrey Ignatenko, Arik Cohen, Bingqin Zhou, Björn Häuser, Cory Harper, Denis Liseichykau, Eric Weaver, Grant Cooksey, Grzegorz Kołakowski, Guillaume Smet, Guy Pascarella, James Gormley, Jeremy Finzel, Jonas Lins, Jos Huiting, Justin Hiza, Lukasz Korzeniowski, Luis Garcés-Erice, Matt Beary, Mickaël Isaert, Moira Tagle, Nathan Mills, Peter Junos, Ruslan Gibaiev, Thomas Prelle, and Victor Xiang!

    Overall, more than 220 individuals have contributed to the Debezium project at this point.

    But not only that, also the number of Debezium users is constantly growing, as e.g. documented on our reference list of Debezium users (let us know if you want to be added). There’s also several new entries in our compilation of public talks and blog posts touching on Debezium, e.g. a highly recommendable talk by Marta Paes about change data capture with Flink SQL and Debezium, a blog post by Cemal Turkoglu about [making sense of change data capture pipelines for Postgres with the Debezium Kafka Connector, and a nice piece on implementing the outbox pattern with Debezium in Quarkus by Iain Porter. Abdellatif Bouchama did an amazing job by creating a cheat sheet for running Debezium on OpenShift.

    Outlook

    With the 1.3 Final release out, planning for the 1.4 version (due by the end of the year) is happening right now. The roadmap still is in flux, so make sure to chime in and let us know about your requirements and feature requests. Some of the things we’re likely going to work on include:

    • The community-led connector for Vitess; the initial contribution has already been merged and we plan to ship the first release of this as part of Debezium 1.4 Alpha1 later this month

    • Moving the MySQL connector to the CDC connector framework shared by most other Debezium connectors; this will drastically reduce maintenance burden of this connector in the future

    • Exploring more powerful snapshotting options (e.g. for parallelization and re-doing snapshots of selected tables)

    • Improving the new LogMiner-based implementation for Oracle

    And lastly, there’s one other area of activity which I’m particularly excited to share here today for the first time: a proof-of-concept of how a potential future Debezium user interface might look like. In that PoC we’re exploring how a graphical UI could help with the set-up and operation of Debezium connectors. We’ve got quite a few ideas in that field and will share more details in a blog post very soon. If you feel adventureous in the meantime, you could grab the current PoC code and take it for spin!

    Until then, happy change data streaming, onwards and upwards!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.3.0.Final Released

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    Please refer to the announcements of the preview releases (Alpha1, Beta1, Beta2, CR1) for more details. Since last week’s CR1 release, we’ve been focusing on ironing out some remaining bugs and improvements to the documentation. To learn more about procedures for upgrading from earlier Debezium versions, please take a look the release notes.

    Thank you to everyone testing the preview releases, this is of invaluable help for spotting and fixing short-comings in new features as well as regressions. And of course I’d also like to thank all the community members contributing to this release: Aaron Brady, Abhishek Gupta, Andrey Ignatenko, Arik Cohen, Bingqin Zhou, Björn Häuser, Cory Harper, Denis Liseichykau, Eric Weaver, Grant Cooksey, Grzegorz Kołakowski, Guillaume Smet, Guy Pascarella, James Gormley, Jeremy Finzel, Jonas Lins, Jos Huiting, Justin Hiza, Lukasz Korzeniowski, Luis Garcés-Erice, Matt Beary, Mickaël Isaert, Moira Tagle, Nathan Mills, Peter Junos, Ruslan Gibaiev, Thomas Prelle, and Victor Xiang!

    Overall, more than 220 individuals have contributed to the Debezium project at this point.

    But not only that, also the number of Debezium users is constantly growing, as e.g. documented on our reference list of Debezium users (let us know if you want to be added). There’s also several new entries in our compilation of public talks and blog posts touching on Debezium, e.g. a highly recommendable talk by Marta Paes about change data capture with Flink SQL and Debezium, a blog post by Cemal Turkoglu about [making sense of change data capture pipelines for Postgres with the Debezium Kafka Connector, and a nice piece on implementing the outbox pattern with Debezium in Quarkus by Iain Porter. Abdellatif Bouchama did an amazing job by creating a cheat sheet for running Debezium on OpenShift.

    Outlook

    With the 1.3 Final release out, planning for the 1.4 version (due by the end of the year) is happening right now. The roadmap still is in flux, so make sure to chime in and let us know about your requirements and feature requests. Some of the things we’re likely going to work on include:

    • The community-led connector for Vitess; the initial contribution has already been merged and we plan to ship the first release of this as part of Debezium 1.4 Alpha1 later this month

    • Moving the MySQL connector to the CDC connector framework shared by most other Debezium connectors; this will drastically reduce maintenance burden of this connector in the future

    • Exploring more powerful snapshotting options (e.g. for parallelization and re-doing snapshots of selected tables)

    • Improving the new LogMiner-based implementation for Oracle

    And lastly, there’s one other area of activity which I’m particularly excited to share here today for the first time: a proof-of-concept of how a potential future Debezium user interface might look like. In that PoC we’re exploring how a graphical UI could help with the set-up and operation of Debezium connectors. We’ve got quite a few ideas in that field and will share more details in a blog post very soon. If you feel adventureous in the meantime, you could grab the current PoC code and take it for spin!

    Until then, happy change data streaming, onwards and upwards!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/10/08/debezium-community-stories-with-renato-mefi/index.html b/blog/2020/10/08/debezium-community-stories-with-renato-mefi/index.html index 79c24f9457..6205f9b2d3 100644 --- a/blog/2020/10/08/debezium-community-stories-with-renato-mefi/index.html +++ b/blog/2020/10/08/debezium-community-stories-with-renato-mefi/index.html @@ -1 +1 @@ - Debezium Community Stories With... Renato Mefi

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    Renato, could you introduce yourself? What is your job, if you’re not contributing to Debezium?

    Hello all, I’m Renato and my first Debezium commit was on Nov 12, 2018, it’s been a long and fun ride so far, and I’m glad to have the opportunity to share my story here with you all!

    I’m a Staff Software Engineer at SurveyMonkey in Amsterdam, The Netherlands, within the Platform team for our CX (customer experience) suite, if you’re curious about what that is, you can check it out here.

    On the internet you’re going to find me talking about Docker, Debezium, Kafka, Microservices and other things that I enjoy. Although those amazing engineering pieces really excite me, at this moment I’m also really passionate about Platform Engineering teams and how they can operate in an organization, the stories I’m going to tell below represent my view of it, of how critical the role of a platform team can be when adopting new technologies and solving difficult problems for the whole, in this case powered by Debezium!

    What are your use cases for Debezium and CDC in your current project?

    It’s a long and enjoyable story (long in terms of the internet), we’ve been using Debezium since Q4 2018, it’s been 2 years at the moment I’m writing those answers here.

    When I classify Debezium within our product, I say it is an architectural component, the idea behind this is to position it as a platform/infrastructure concern, in a way that it can reach multiple parts of the stack and services. I consider this abstraction of Debezium one of the key success factors it had in its adoption and growth within our platform, let me explain this better!

    Our first use case is likely to be one of the most common ones for CDC, the strangler pattern, which for us came before Debezium; so let me tell this part of the story first: when I joined Usabilla (later acquired by SurveyMoney), there was already an effort to move our platform to a new architecture and the strangler pattern was already there. When the first couple of services started to grow, their primary way to bring data out of legacy was to poll the database, and needless to say, this could go very wrong! Our legacy database is a MongoDB cluster, and since I was pre-occupied with the polling approach, I started to dig into possibilities. I was hoping to find something like a streaming API for it, but what I ended up encountering was the database changelog (Write-ahead logging, "oplog" as it’s called in Mongo!

    It came to my mind right away: "Oh, I could write something that queries the data from the oplog and sends it to Kafka". So I checked with our in-house Senior SRE and MongoDB expert Gijs Kunze who thought it could be a good idea; as a next step I went to talk to my colleague Rafael Dohms, and we decided to do some extra Googling, and like that, we found Debezium! It was the perfect match to our needs and better than what we could have written by ourselves!

    Now back to our use case, what makes it an architecture component for us, is basically the approach, we abstracted and wrapped Debezium in a project called Legacy Data Syncer (LDS for us, because acronyms never get old). Although it might look simple to spin up a Kafka Connect with Debezium, running it production-ready, monitoring multiple collections within the database, exposing metrics, doing transformations and more, is not such an easy task. So how does it work? Every time an engineering team needs to capture data from our legacy system, to start strangling a feature, they only have to do two things, open a pull request which literally adds one line to LDS, and create their Kafka consumer!

    Figure 1. The configuration file in LDS; a developer will open a PR adding a new line, the rest will be taken care of.

    Upon merging the PR, our project will provision the whole configuration to Kafka Connect, it ensures the snapshot is executed, metrics are present and etc; We’ve done the same thing for the outbox pattern and I talk a little bit more about it in this tweet thread.

    Self-servicing the teams was a great way to remove resistance for adoption, no Jira tickets were necessary, no advanced ops knowledge or anything else to get it running. The other factors I consider to have contributed to Debezium’s success in our platform is its reliability and straight forward value perception, in those two years we never had major outages or critical problems of any kind!

    You mention the outbox pattern; Could you tell more about why and how you’re using this?

    Absolutely! One more time, it’s crazy how CDC and Debezium can simplify some of the most critical architectural parts of big platforms! One year after using Debezium in the core of our architecture migration, we had another problem at our hands: how to reliably write data to our new source of truth databases and propagate messages to Kafka at the same time. Although it seems to be simple to answer and find a solution, each of them comes with a major drawback.

    Which solutions do we have?

    • Embrace eventual consistency to its peak by adopting event sourcing, by writing first to Kafka and reading our own writes; the drawbacks here are extra complexity and intensified eventual consistency

    • Dual writes, well, actually this is not an option, because as you know, "Friends Don’t Let Friends Do Dual Writes"!

    • Different approaches of distributed transactions like 2PC and sagas; the costs here are performance and engineering effort, now every service we have has to either become a transaction coordinator or have rollback capabilities, also the cascade effect scared us quite a bit!

    Well, what’s left? We found that outbox was the right answer for us, but before we get there, let me get into the cost x benefit equation of our decision making!

    Although some of the options were quite attractive technically, for instance event sourcing, the engineering effort and growth is immense. Also, it’s not the kind of thing which comes ready to use, and there’s a lot of discovery to be made along the way, so what were the constraints and desires:

    • Reliability; we want at least once semantics, exactly once isn’t necessary as we can uniquely identify each message/event;

    • Eventual consistency only between services, but not within the services themselves. Being able to interact with a service which is the source of truth of a certain model, and get an immediate answer is not just handy, but incredibly powerful (and that’s why monoliths are also so attractive);

    • Avoiding distributed transactions as much as we can, it’s scary and we should be scared about it too!

    • Manageable effort; how can we "easily" get 30+ engineers to adopt a solution for this problem? At the same time, how can you ensure the implementation guarantees among every service and team?

    We realized that the outbox pattern would help us meet those requirements: applications would publish events via an outbox table, which gets written to as part of business transactions in the database.

    As with the strangler pattern, we wanted to resort to an architecture component, something the teams could self-service. At first, we were exploring a home-grown solution which would look for the outbox tables among every service and publish the messages. The problem with this approach would be the polling databases problem, although in this case this is less harmful as we don’t need to look for updates or deletes.

    Luckily, by that time I was closely following the work being done in Debezium and I read the blogpost about reliable data exchange between microservices using the outbox pattern, and there was my answer! Well, I mean, parts of the answer, we still needed to implement it, and that’s a story for the next question!

    Fast forward a couple of months and we got a reliable way to exchange messages between services, with all the guarantees we wanted to have, and by applying some platform DevOps flavor to it, we also made it self-service and easy to plug in every service!

    The user can specify which database their service is at, what’s the table name, and which column to use as event router, you can find more details about it in the official Debezium outbox event router docs.

    Figure 2. The configuration file for configuring outbox connectors

    You’re not only using Debezium but you’ve also contributed to the project. How was your experience doing so? Are you doing other open-source work, too?

    As I spoiled at the beginning, my usage and contributions to Debezium walked hand-to-hand. In both the use cases we have for Debezium in SurveyMonkey, I had great opportunities to contribute to both Debezium and Kafka (just a bug fix, but I’m happy about it!).

    At first, I was fixing bugs in the Debezium MongoDB connector; as we really scaled it up to all the teams, a lot of edge cases started to show up, mostly in the transformation which takes the raw database transaction log and transforms it into a nicely readable Kafka Connect struct. Also due to our architecture choice, we split the raw log and transformed data into two different steps, which go in separate topics and are configured as separate Kafka Connect connectors.

    Quick sidestep: the rationale behind this decision was to be able to survive transformation errors; MongoDB has a replication window which, if you lose it, means that you are going to have to make a new full snapshot of the collection and you might lose deletion events in this process. Because of this we opted for a safer approach, which was to split the logic of transformation from the raw logs like this: The step we call op (stands for operation), is the Debezium MongoDB source connector and outputs the raw data into the topic without any change or transformation, minimizing the chances of errors in the process. The second step called cdc, is a Salesforce Mirus source connector, which reads from the op output topic, transforms the message using the Debezium document flattening SMT and outputs to the final topic, which the services can consume from. With this approach, we now have two main abilities: Resist to errors and crashes on the native/custom transformation process like mentioned above, and we have the chance to change the transformation to our desires without having to read from the database again, giving us more flexibility. That also created some extra features and challenges to be incorporated in Debezium itself! As I kept contributing I noticed a few things that could be improved and started fixing them, including an almost full refactor of the build process of Debezium’s container images, its scripts, and other smaller things!

    Let’s circle back to outbox; when the post about this appeared on the Debezium blog, it was mostly an idea and a proof-of-concept. But we really wanted it to run in production, in this case, why not partnership on it?

    I want to take the opportunity here to mention how helpful the Debezium community was for getting me started with contributing. As I showed the intent to work on this, they were super welcoming and we had a call about it, so I quickly felt productive working on the code base.

    Almost immediately after the conversation I started a technical draft (which you can see here) and soon thereafter, the first implementation was done. I can almost certainly say we were the first ones to run the transactional outbox pattern powered by Debezium. I was running a custom build on our platform, which then finally became the official outbox event router you see in the Debezium docs today. I was lucky to be there at the right time and with the right people, so thanks again to the Debezium team for helping me throughout the whole process of drafting and making it happen!

    Will I do more open source? Yes, but I must say most of my open source activity is "selfish", I’m developing solutions to problems I face at work but I’m happy to take the extra step and make them to the OSS world, but it also makes it seasonal. One of the advantages to that is if I’m doing something for a project, be sure I’ll make it to production and likely be able to find more corner cases!

    Is there anything you’re missing in Debezium or you’d like to see improved in the future?

    When I think of the Kafka and Debezium ecosystem, the next steps I consider important are the ones which will make it more accessible. Although there’s a lot of content and examples online, there’s still a big gap between reading those and getting to a production ready implementation.

    What I mean by that is abstracting the individual pieces away and giving them more meaning. The outbox pattern is a good example, it was not natural for people to think of CDC and know that it was such a good match to it, there are plenty of more use cases to be explored in this ecosystem.

    What if you could have everything out-of-the-box? An outbox implementation in your favorite framework, which knows how to integrate with the ORM, handle the transaction part, then, how to shape the messages and events? How to adopt the schema for it and how an evolution of it looks like. After that, getting closer to the consumer implementation, how can I handle the messages idempotently, respect the semantics, do retries, and project them to a database if need be? There are already initiatives like those, for instance, the Quarkus Outbox extension, which takes care of framework and database integration. The future for me has those things, for multiple frameworks and tech stacks, going even broader and helping you design good events (maybe even powered by AsyncAPI), giving everyone a kickstart!

    Those are very complex things to do in a growing architecture, the patterns will keep repeating and hopefully the community will be able to come to consensus of design and implementations, and that’s what I think the next step is, a place where the complexity of a good architecture doesn’t live in the wires and plugs anymore, making it more accessible!

    Bonus question: What’s the next big thing in software engineering?

    I think I handled clues for this one in many parts of my previous answers!

    For me the next big thing is a methodology; I often say the evolution of DevOps is self-service, and it can go in many layers of the stack. The examples I gave about our Debezium implementation is what I call self-service between Platform/Ops and product development teams, but it can be applied in many, many places!

    The idea is to facilitate the implementation of complex structures, something more end-to-end, taking care of the good practices in metrics, alerts, and diverse other guaranteed semantics for the use case! We can see there’s a convergence towards that path, for instance Kubernetes operators are a great example where you can abstract one use case which will be translated to many, if not dozens of internal resources in the infrastructure.

    I believe we already have the base technology to do so, all the Infrastructure as Code, containers, frameworks, observability systems are there, we just have to give meaning to them!

    Where’s the framework where I can: Handle a user request, validate, write to the source-of-truth, produce a message to my broker, consume at another end where my only concern is the payload itself? All the semantics should be taken care of, idempotency, retries, SerDes issues, dead letter queues, eventual consistency mitigations, metrics, alerts, SLOs, SLAs, etc!

    And that’s where I put my energy in everyday at work, giving all the engineering teams a more fun and safe way to develop their software, which also sums up my passion for Platform Engineering!

    Renato, thanks a lot for taking your time, it was a pleasure to have you here!

    If you’d like to stay in touch with Renato Mefi and discuss with him, please drop a comment below or follow and reach out to him on Twitter.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium Community Stories With... Renato Mefi

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    Renato, could you introduce yourself? What is your job, if you’re not contributing to Debezium?

    Hello all, I’m Renato and my first Debezium commit was on Nov 12, 2018, it’s been a long and fun ride so far, and I’m glad to have the opportunity to share my story here with you all!

    I’m a Staff Software Engineer at SurveyMonkey in Amsterdam, The Netherlands, within the Platform team for our CX (customer experience) suite, if you’re curious about what that is, you can check it out here.

    On the internet you’re going to find me talking about Docker, Debezium, Kafka, Microservices and other things that I enjoy. Although those amazing engineering pieces really excite me, at this moment I’m also really passionate about Platform Engineering teams and how they can operate in an organization, the stories I’m going to tell below represent my view of it, of how critical the role of a platform team can be when adopting new technologies and solving difficult problems for the whole, in this case powered by Debezium!

    What are your use cases for Debezium and CDC in your current project?

    It’s a long and enjoyable story (long in terms of the internet), we’ve been using Debezium since Q4 2018, it’s been 2 years at the moment I’m writing those answers here.

    When I classify Debezium within our product, I say it is an architectural component, the idea behind this is to position it as a platform/infrastructure concern, in a way that it can reach multiple parts of the stack and services. I consider this abstraction of Debezium one of the key success factors it had in its adoption and growth within our platform, let me explain this better!

    Our first use case is likely to be one of the most common ones for CDC, the strangler pattern, which for us came before Debezium; so let me tell this part of the story first: when I joined Usabilla (later acquired by SurveyMoney), there was already an effort to move our platform to a new architecture and the strangler pattern was already there. When the first couple of services started to grow, their primary way to bring data out of legacy was to poll the database, and needless to say, this could go very wrong! Our legacy database is a MongoDB cluster, and since I was pre-occupied with the polling approach, I started to dig into possibilities. I was hoping to find something like a streaming API for it, but what I ended up encountering was the database changelog (Write-ahead logging, "oplog" as it’s called in Mongo!

    It came to my mind right away: "Oh, I could write something that queries the data from the oplog and sends it to Kafka". So I checked with our in-house Senior SRE and MongoDB expert Gijs Kunze who thought it could be a good idea; as a next step I went to talk to my colleague Rafael Dohms, and we decided to do some extra Googling, and like that, we found Debezium! It was the perfect match to our needs and better than what we could have written by ourselves!

    Now back to our use case, what makes it an architecture component for us, is basically the approach, we abstracted and wrapped Debezium in a project called Legacy Data Syncer (LDS for us, because acronyms never get old). Although it might look simple to spin up a Kafka Connect with Debezium, running it production-ready, monitoring multiple collections within the database, exposing metrics, doing transformations and more, is not such an easy task. So how does it work? Every time an engineering team needs to capture data from our legacy system, to start strangling a feature, they only have to do two things, open a pull request which literally adds one line to LDS, and create their Kafka consumer!

    Figure 1. The configuration file in LDS; a developer will open a PR adding a new line, the rest will be taken care of.

    Upon merging the PR, our project will provision the whole configuration to Kafka Connect, it ensures the snapshot is executed, metrics are present and etc; We’ve done the same thing for the outbox pattern and I talk a little bit more about it in this tweet thread.

    Self-servicing the teams was a great way to remove resistance for adoption, no Jira tickets were necessary, no advanced ops knowledge or anything else to get it running. The other factors I consider to have contributed to Debezium’s success in our platform is its reliability and straight forward value perception, in those two years we never had major outages or critical problems of any kind!

    You mention the outbox pattern; Could you tell more about why and how you’re using this?

    Absolutely! One more time, it’s crazy how CDC and Debezium can simplify some of the most critical architectural parts of big platforms! One year after using Debezium in the core of our architecture migration, we had another problem at our hands: how to reliably write data to our new source of truth databases and propagate messages to Kafka at the same time. Although it seems to be simple to answer and find a solution, each of them comes with a major drawback.

    Which solutions do we have?

    • Embrace eventual consistency to its peak by adopting event sourcing, by writing first to Kafka and reading our own writes; the drawbacks here are extra complexity and intensified eventual consistency

    • Dual writes, well, actually this is not an option, because as you know, "Friends Don’t Let Friends Do Dual Writes"!

    • Different approaches of distributed transactions like 2PC and sagas; the costs here are performance and engineering effort, now every service we have has to either become a transaction coordinator or have rollback capabilities, also the cascade effect scared us quite a bit!

    Well, what’s left? We found that outbox was the right answer for us, but before we get there, let me get into the cost x benefit equation of our decision making!

    Although some of the options were quite attractive technically, for instance event sourcing, the engineering effort and growth is immense. Also, it’s not the kind of thing which comes ready to use, and there’s a lot of discovery to be made along the way, so what were the constraints and desires:

    • Reliability; we want at least once semantics, exactly once isn’t necessary as we can uniquely identify each message/event;

    • Eventual consistency only between services, but not within the services themselves. Being able to interact with a service which is the source of truth of a certain model, and get an immediate answer is not just handy, but incredibly powerful (and that’s why monoliths are also so attractive);

    • Avoiding distributed transactions as much as we can, it’s scary and we should be scared about it too!

    • Manageable effort; how can we "easily" get 30+ engineers to adopt a solution for this problem? At the same time, how can you ensure the implementation guarantees among every service and team?

    We realized that the outbox pattern would help us meet those requirements: applications would publish events via an outbox table, which gets written to as part of business transactions in the database.

    As with the strangler pattern, we wanted to resort to an architecture component, something the teams could self-service. At first, we were exploring a home-grown solution which would look for the outbox tables among every service and publish the messages. The problem with this approach would be the polling databases problem, although in this case this is less harmful as we don’t need to look for updates or deletes.

    Luckily, by that time I was closely following the work being done in Debezium and I read the blogpost about reliable data exchange between microservices using the outbox pattern, and there was my answer! Well, I mean, parts of the answer, we still needed to implement it, and that’s a story for the next question!

    Fast forward a couple of months and we got a reliable way to exchange messages between services, with all the guarantees we wanted to have, and by applying some platform DevOps flavor to it, we also made it self-service and easy to plug in every service!

    The user can specify which database their service is at, what’s the table name, and which column to use as event router, you can find more details about it in the official Debezium outbox event router docs.

    Figure 2. The configuration file for configuring outbox connectors

    You’re not only using Debezium but you’ve also contributed to the project. How was your experience doing so? Are you doing other open-source work, too?

    As I spoiled at the beginning, my usage and contributions to Debezium walked hand-to-hand. In both the use cases we have for Debezium in SurveyMonkey, I had great opportunities to contribute to both Debezium and Kafka (just a bug fix, but I’m happy about it!).

    At first, I was fixing bugs in the Debezium MongoDB connector; as we really scaled it up to all the teams, a lot of edge cases started to show up, mostly in the transformation which takes the raw database transaction log and transforms it into a nicely readable Kafka Connect struct. Also due to our architecture choice, we split the raw log and transformed data into two different steps, which go in separate topics and are configured as separate Kafka Connect connectors.

    Quick sidestep: the rationale behind this decision was to be able to survive transformation errors; MongoDB has a replication window which, if you lose it, means that you are going to have to make a new full snapshot of the collection and you might lose deletion events in this process. Because of this we opted for a safer approach, which was to split the logic of transformation from the raw logs like this: The step we call op (stands for operation), is the Debezium MongoDB source connector and outputs the raw data into the topic without any change or transformation, minimizing the chances of errors in the process. The second step called cdc, is a Salesforce Mirus source connector, which reads from the op output topic, transforms the message using the Debezium document flattening SMT and outputs to the final topic, which the services can consume from. With this approach, we now have two main abilities: Resist to errors and crashes on the native/custom transformation process like mentioned above, and we have the chance to change the transformation to our desires without having to read from the database again, giving us more flexibility. That also created some extra features and challenges to be incorporated in Debezium itself! As I kept contributing I noticed a few things that could be improved and started fixing them, including an almost full refactor of the build process of Debezium’s container images, its scripts, and other smaller things!

    Let’s circle back to outbox; when the post about this appeared on the Debezium blog, it was mostly an idea and a proof-of-concept. But we really wanted it to run in production, in this case, why not partnership on it?

    I want to take the opportunity here to mention how helpful the Debezium community was for getting me started with contributing. As I showed the intent to work on this, they were super welcoming and we had a call about it, so I quickly felt productive working on the code base.

    Almost immediately after the conversation I started a technical draft (which you can see here) and soon thereafter, the first implementation was done. I can almost certainly say we were the first ones to run the transactional outbox pattern powered by Debezium. I was running a custom build on our platform, which then finally became the official outbox event router you see in the Debezium docs today. I was lucky to be there at the right time and with the right people, so thanks again to the Debezium team for helping me throughout the whole process of drafting and making it happen!

    Will I do more open source? Yes, but I must say most of my open source activity is "selfish", I’m developing solutions to problems I face at work but I’m happy to take the extra step and make them to the OSS world, but it also makes it seasonal. One of the advantages to that is if I’m doing something for a project, be sure I’ll make it to production and likely be able to find more corner cases!

    Is there anything you’re missing in Debezium or you’d like to see improved in the future?

    When I think of the Kafka and Debezium ecosystem, the next steps I consider important are the ones which will make it more accessible. Although there’s a lot of content and examples online, there’s still a big gap between reading those and getting to a production ready implementation.

    What I mean by that is abstracting the individual pieces away and giving them more meaning. The outbox pattern is a good example, it was not natural for people to think of CDC and know that it was such a good match to it, there are plenty of more use cases to be explored in this ecosystem.

    What if you could have everything out-of-the-box? An outbox implementation in your favorite framework, which knows how to integrate with the ORM, handle the transaction part, then, how to shape the messages and events? How to adopt the schema for it and how an evolution of it looks like. After that, getting closer to the consumer implementation, how can I handle the messages idempotently, respect the semantics, do retries, and project them to a database if need be? There are already initiatives like those, for instance, the Quarkus Outbox extension, which takes care of framework and database integration. The future for me has those things, for multiple frameworks and tech stacks, going even broader and helping you design good events (maybe even powered by AsyncAPI), giving everyone a kickstart!

    Those are very complex things to do in a growing architecture, the patterns will keep repeating and hopefully the community will be able to come to consensus of design and implementations, and that’s what I think the next step is, a place where the complexity of a good architecture doesn’t live in the wires and plugs anymore, making it more accessible!

    Bonus question: What’s the next big thing in software engineering?

    I think I handled clues for this one in many parts of my previous answers!

    For me the next big thing is a methodology; I often say the evolution of DevOps is self-service, and it can go in many layers of the stack. The examples I gave about our Debezium implementation is what I call self-service between Platform/Ops and product development teams, but it can be applied in many, many places!

    The idea is to facilitate the implementation of complex structures, something more end-to-end, taking care of the good practices in metrics, alerts, and diverse other guaranteed semantics for the use case! We can see there’s a convergence towards that path, for instance Kubernetes operators are a great example where you can abstract one use case which will be translated to many, if not dozens of internal resources in the infrastructure.

    I believe we already have the base technology to do so, all the Infrastructure as Code, containers, frameworks, observability systems are there, we just have to give meaning to them!

    Where’s the framework where I can: Handle a user request, validate, write to the source-of-truth, produce a message to my broker, consume at another end where my only concern is the payload itself? All the semantics should be taken care of, idempotency, retries, SerDes issues, dead letter queues, eventual consistency mitigations, metrics, alerts, SLOs, SLAs, etc!

    And that’s where I put my energy in everyday at work, giving all the engineering teams a more fun and safe way to develop their software, which also sums up my passion for Platform Engineering!

    Renato, thanks a lot for taking your time, it was a pleasure to have you here!

    If you’d like to stay in touch with Renato Mefi and discuss with him, please drop a comment below or follow and reach out to him on Twitter.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/10/22/towards-debezium-ui/index.html b/blog/2020/10/22/towards-debezium-ui/index.html index 554c1bd68d..ff7b7e37b9 100644 --- a/blog/2020/10/22/towards-debezium-ui/index.html +++ b/blog/2020/10/22/towards-debezium-ui/index.html @@ -1 +1 @@ - Towards a Graphical Debezium User Interface

    Over the last five years, Debezium has become a leading open-source solution for change data capture for a variety of databases. Users from all kinds of industries work with Debezium for use cases like replication of data from operational databases into data warehouses, updating caches and search indexes, driving streaming queries via Kafka Streams or Apache Flink, synchronizing data between microservices, and many more.

    When talking to Debezium users, we generally receive very good feedback on the range of applications enabled by Debezium and its flexibility: e.g. each connector can be configured and fine-tuned in many ways, depending on your specific requirements. A large number of metrics provide deep insight into the state of running Debezium connectors, allowing to safely operate CDC pipelines also in huge installations with thousands of connectors.

    All this comes at the cost of a learning curve, though: users new to Debezium need to understand the different options and settings as well as learn about best practices for running Debezium in production. We’re therefore constantly exploring how the user experience of Debezium can be further improved, allowing people to set up and operate its connectors more easily.

    Today it’s my great pleasure to introduce you to a proof-of-concept for a potential future Debezium graphical user interface. The goal for this PoC is to explore how a graphical UI could facilitate the getting started and operational experience of Debezium users.

    The scope of the PoC is the set-up flow for configuring and instantiating a Debezium Postgres connector. The user is guided through the required configuration steps in a wizard interface, starting from mandatory information (e.g. database credentials), over selecting the tables to be captured, up to optional settings like different data mapping options. After reviewing the final configuration, the UI will instantiate the connector in Kafka Connect.

    You can see a short demo of how this looks like in this video:

    We focused on some core interaction patterns, e.g. the preview functionality for the selecting the captured tables. Instead of solely taking key/value pairs of configuration parameters, the UI should guide the user through the process, provide context and help, e.g. by showing the allowed options for settings in drop-downs, validating the provided settings after each step, and more.

    Now this is just the beginning, there’s many more things that could be done in such Debezium UI, e.g. in the connection configuration step, we could validate whether the given user has all the required database permissions, whether the right WAL level is set in the database, etc. There could be views for monitoring and trouble-shooting connectors. When running on Kubernetes, the UI could produce resource definitions processed by a Kafka (Connector) operator like Strimzi (instead of calling the Kafka Connect REST API), and much more.

    But before further progressing with this, we’d like to gather your feedback and opinions: Do you consider a graphical UI for Debezium useful in general, and is it something you would use in your projects? What is your feedback on the functionality currently implemented in the PoC? Which other functionality besides connector configuration would you like to see in a Debezium UI? We’ve provided a short survey with these and a few other questions:

    Before taking the questionnaire, please watch the video or run the PoC yourself (see below). Answering these questions should take just a few minutes; your participation would be very helpful for us in order to decide whether and how we should move forward with this effort.

    Trying It Out Yourself

    As everything in Debezium, the UI PoC is fully open source (Apache License Version 2.0); you can find its source code under the Debezium organization on Git Hub. The PoC is implemented as a Quarkus-based web application, using React as the frontend technology.

    The Quarkus backend is configured with the URL(s) of one more Kafka Connect clusters. Note that there’s currently no means of authentication or authorization implemented in the PoC, so don’t use it with your production Connect clusters just yet. After starting the application, you can choose the cluster to work with from the drop-down to the top right. Different from what’s shown in the video recording, the "Delete" button is working in the PoC now, too ;) There’s an example Docker Compose file, which starts up all required components for getting started quickly. Alternatively, you can obtain a pre-built container image with the Debezium UI PoC from Docker Hub. Please refer to the README file for more details on building and running the Debezium UI PoC.

    We’re looking forward very much to learning about your feedback on the Debezium UI PoC. Try it out yourself and let us know about your thoughts in the comments below and by participating in the quick survey linked above.

    A big thank you to the team working on this PoC: Ashique Ansari, Indra Shukla, June Zhang, Mark Drilling, Na Ding, and René Kerner!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Towards a Graphical Debezium User Interface

    Over the last five years, Debezium has become a leading open-source solution for change data capture for a variety of databases. Users from all kinds of industries work with Debezium for use cases like replication of data from operational databases into data warehouses, updating caches and search indexes, driving streaming queries via Kafka Streams or Apache Flink, synchronizing data between microservices, and many more.

    When talking to Debezium users, we generally receive very good feedback on the range of applications enabled by Debezium and its flexibility: e.g. each connector can be configured and fine-tuned in many ways, depending on your specific requirements. A large number of metrics provide deep insight into the state of running Debezium connectors, allowing to safely operate CDC pipelines also in huge installations with thousands of connectors.

    All this comes at the cost of a learning curve, though: users new to Debezium need to understand the different options and settings as well as learn about best practices for running Debezium in production. We’re therefore constantly exploring how the user experience of Debezium can be further improved, allowing people to set up and operate its connectors more easily.

    Today it’s my great pleasure to introduce you to a proof-of-concept for a potential future Debezium graphical user interface. The goal for this PoC is to explore how a graphical UI could facilitate the getting started and operational experience of Debezium users.

    The scope of the PoC is the set-up flow for configuring and instantiating a Debezium Postgres connector. The user is guided through the required configuration steps in a wizard interface, starting from mandatory information (e.g. database credentials), over selecting the tables to be captured, up to optional settings like different data mapping options. After reviewing the final configuration, the UI will instantiate the connector in Kafka Connect.

    You can see a short demo of how this looks like in this video:

    We focused on some core interaction patterns, e.g. the preview functionality for the selecting the captured tables. Instead of solely taking key/value pairs of configuration parameters, the UI should guide the user through the process, provide context and help, e.g. by showing the allowed options for settings in drop-downs, validating the provided settings after each step, and more.

    Now this is just the beginning, there’s many more things that could be done in such Debezium UI, e.g. in the connection configuration step, we could validate whether the given user has all the required database permissions, whether the right WAL level is set in the database, etc. There could be views for monitoring and trouble-shooting connectors. When running on Kubernetes, the UI could produce resource definitions processed by a Kafka (Connector) operator like Strimzi (instead of calling the Kafka Connect REST API), and much more.

    But before further progressing with this, we’d like to gather your feedback and opinions: Do you consider a graphical UI for Debezium useful in general, and is it something you would use in your projects? What is your feedback on the functionality currently implemented in the PoC? Which other functionality besides connector configuration would you like to see in a Debezium UI? We’ve provided a short survey with these and a few other questions:

    Before taking the questionnaire, please watch the video or run the PoC yourself (see below). Answering these questions should take just a few minutes; your participation would be very helpful for us in order to decide whether and how we should move forward with this effort.

    Trying It Out Yourself

    As everything in Debezium, the UI PoC is fully open source (Apache License Version 2.0); you can find its source code under the Debezium organization on Git Hub. The PoC is implemented as a Quarkus-based web application, using React as the frontend technology.

    The Quarkus backend is configured with the URL(s) of one more Kafka Connect clusters. Note that there’s currently no means of authentication or authorization implemented in the PoC, so don’t use it with your production Connect clusters just yet. After starting the application, you can choose the cluster to work with from the drop-down to the top right. Different from what’s shown in the video recording, the "Delete" button is working in the PoC now, too ;) There’s an example Docker Compose file, which starts up all required components for getting started quickly. Alternatively, you can obtain a pre-built container image with the Debezium UI PoC from Docker Hub. Please refer to the README file for more details on building and running the Debezium UI PoC.

    We’re looking forward very much to learning about your feedback on the Debezium UI PoC. Try it out yourself and let us know about your thoughts in the comments below and by participating in the quick survey linked above.

    A big thank you to the team working on this PoC: Ashique Ansari, Indra Shukla, June Zhang, Mark Drilling, Na Ding, and René Kerner!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/10/23/debezium-1-4-alpha1-released/index.html b/blog/2020/10/23/debezium-1-4-alpha1-released/index.html index 9a7eb300d5..d033e036b7 100644 --- a/blog/2020/10/23/debezium-1-4-alpha1-released/index.html +++ b/blog/2020/10/23/debezium-1-4-alpha1-released/index.html @@ -1 +1 @@ - Debezium 1.4.0.Alpha1 Released

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    Vitess Connector

    Vitess is a database solution for deploying, scaling, and managing large clusters of MySQL. We are very happy that the development team around Ruslan Gibaiev and Kewei Shang of Bolt Technology OÜ decided to build a CDC solution based on Debezium and to open-source it under the Debezium umbrella. This connector is released in incubating state in Debezium 1.4.

    Ruslan and Kewei will follow up with a blog post with more details around this connector very soon; in the mean time please refer to the connector reference documentation to learn more.

    Fine-grained Selection of Snapshotted Tables

    One of the major focus points for Debezium 1.4 is to explore more flexible snapshot options, e.g. to re-snapshot chosen tables or parallelizing long-running snapshot operations.

    A first improvement related to snapshotting is the new connector configuration snapshot.include.collection.list, which allows to snapshot only a subset of all the tables which the connector will capture later on during log reading. This comes in handy if for instance you’re interested in capturing changes to all your tables, but only need an initial snasphot of the data for some of them.

    For the Postgres connector, by creating a custom implementation of the Snapshotter SPI contract, this also allows for a selective re-snapshot of specific tables. After restarting the connector, such Snapshotter would continue to read the log from the point where it left off previously until "now", then it would take a snapshot of the given tables, and finally continue to read the log for all captured tables.

    For more information on this option, please see the connector-specific documentation for more details.

    Other Features

    Besides these key features, there’s a few other features coming with the 1.4.0.Alpha1 release:

    • Implement snapshot select override behavior for MongoDB DBZ-2496

    • SqlServer - Skip processing of LSNs not associated with change table entries DBZ-2582

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • Cant override environment variables DBZ-2559

    • ConcurrentModificationException during exporting data for a mongodb collection in a sharded cluster DBZ-2597

    • Mysql connector didn’t pass the default db charset to the column definition DBZ-2604

    • [Doc] "registry.redhat.io/amq7/amq-streams-kafka-25: unknown: Not Found" error occurs DBZ-2609

    • [Doc] "Error: no context directory and no Containerfile specified" error occurs DBZ-2610

    • SqlExceptions using dbz with Oracle on RDS online logs and LogMiner DBZ-2624

    • Mining session stopped - task killed/SQL operation cancelled - Oracle LogMiner DBZ-2629

    • Unparseable DDL: Using 'trigger' as table alias in view creation DBZ-2639

    • Antlr DDL parser fails to interpret BLOB([size]) DBZ-2641

    • MySQL Connector keeps stale offset metadata after snapshot.new.tables is changed DBZ-2643

    • WAL logs are not flushed in Postgres Connector DBZ-2653

    • Debezium Server Event Hubs plugin support in v1.3 DBZ-2660

    • Cassandra Connector doesn’t use log4j for logging correctly DBZ-2661

    • Should Allow NonAsciiCharacter in SQL DBZ-2670

    • MariaDB nextval function is not supported in grammar DBZ-2671

    • Sanitize field name do not sanitize sub struct field DBZ-2680

    • Debezium fails if a non-existing view with the same name as existing table is dropped DBZ-2688

    A big thank you to all the contributors from the community who worked on this release: Faizan, Sergei Morozov, Kewei Shang, Michael Wang, Arik Cohen, James Gormley, jinguangyang, Kaushik Iyer, John Martin, Travis Elnicky, Yiming Liu, and Bingqin Zhou!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.4.0.Alpha1 Released

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    Vitess Connector

    Vitess is a database solution for deploying, scaling, and managing large clusters of MySQL. We are very happy that the development team around Ruslan Gibaiev and Kewei Shang of Bolt Technology OÜ decided to build a CDC solution based on Debezium and to open-source it under the Debezium umbrella. This connector is released in incubating state in Debezium 1.4.

    Ruslan and Kewei will follow up with a blog post with more details around this connector very soon; in the mean time please refer to the connector reference documentation to learn more.

    Fine-grained Selection of Snapshotted Tables

    One of the major focus points for Debezium 1.4 is to explore more flexible snapshot options, e.g. to re-snapshot chosen tables or parallelizing long-running snapshot operations.

    A first improvement related to snapshotting is the new connector configuration snapshot.include.collection.list, which allows to snapshot only a subset of all the tables which the connector will capture later on during log reading. This comes in handy if for instance you’re interested in capturing changes to all your tables, but only need an initial snasphot of the data for some of them.

    For the Postgres connector, by creating a custom implementation of the Snapshotter SPI contract, this also allows for a selective re-snapshot of specific tables. After restarting the connector, such Snapshotter would continue to read the log from the point where it left off previously until "now", then it would take a snapshot of the given tables, and finally continue to read the log for all captured tables.

    For more information on this option, please see the connector-specific documentation for more details.

    Other Features

    Besides these key features, there’s a few other features coming with the 1.4.0.Alpha1 release:

    • Implement snapshot select override behavior for MongoDB DBZ-2496

    • SqlServer - Skip processing of LSNs not associated with change table entries DBZ-2582

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • Cant override environment variables DBZ-2559

    • ConcurrentModificationException during exporting data for a mongodb collection in a sharded cluster DBZ-2597

    • Mysql connector didn’t pass the default db charset to the column definition DBZ-2604

    • [Doc] "registry.redhat.io/amq7/amq-streams-kafka-25: unknown: Not Found" error occurs DBZ-2609

    • [Doc] "Error: no context directory and no Containerfile specified" error occurs DBZ-2610

    • SqlExceptions using dbz with Oracle on RDS online logs and LogMiner DBZ-2624

    • Mining session stopped - task killed/SQL operation cancelled - Oracle LogMiner DBZ-2629

    • Unparseable DDL: Using 'trigger' as table alias in view creation DBZ-2639

    • Antlr DDL parser fails to interpret BLOB([size]) DBZ-2641

    • MySQL Connector keeps stale offset metadata after snapshot.new.tables is changed DBZ-2643

    • WAL logs are not flushed in Postgres Connector DBZ-2653

    • Debezium Server Event Hubs plugin support in v1.3 DBZ-2660

    • Cassandra Connector doesn’t use log4j for logging correctly DBZ-2661

    • Should Allow NonAsciiCharacter in SQL DBZ-2670

    • MariaDB nextval function is not supported in grammar DBZ-2671

    • Sanitize field name do not sanitize sub struct field DBZ-2680

    • Debezium fails if a non-existing view with the same name as existing table is dropped DBZ-2688

    A big thank you to all the contributors from the community who worked on this release: Faizan, Sergei Morozov, Kewei Shang, Michael Wang, Arik Cohen, James Gormley, jinguangyang, Kaushik Iyer, John Martin, Travis Elnicky, Yiming Liu, and Bingqin Zhou!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/10/27/hello-debezium/index.html b/blog/2020/10/27/hello-debezium/index.html index 6ff5024f64..19b4d86685 100644 --- a/blog/2020/10/27/hello-debezium/index.html +++ b/blog/2020/10/27/hello-debezium/index.html @@ -1 +1 @@ - Hello Debezium!

    Hello everyone, my name is Anisha Mohanty and I recently joined Red Hat and the Debezium team.

    I started my journey with Red Hat in April 2020 after completing my graduation. I was introduced to open source in my early college days, but I wasn’t aware of how organizations work and wanted to get the essence of open source ethics and values. That is something that I am fascinated to learn as I joined Red Hat.

    My work started under the Data Virtualization team with Teiid and then under the GRAPHQLCRUD project which is a standard for a generic query interface on top of GraphQL. The project has started well and is in great shape right now. We have successfully added CRUD capabilities, paging, and filtering specifications.

    Coming to Debezium, I first heard about it as some DV members started contributing here, well back then it was a completely new thing for me. I started exploring more, and it was not long when I had my first interaction with Gunnar and Jiri. With a warm welcome and great team here, I am really excited to work with the Debezium Community.

    Can’t wait to learn and explore awesome things. Happy to get started here!

    --Anisha

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Hello Debezium!

    Hello everyone, my name is Anisha Mohanty and I recently joined Red Hat and the Debezium team.

    I started my journey with Red Hat in April 2020 after completing my graduation. I was introduced to open source in my early college days, but I wasn’t aware of how organizations work and wanted to get the essence of open source ethics and values. That is something that I am fascinated to learn as I joined Red Hat.

    My work started under the Data Virtualization team with Teiid and then under the GRAPHQLCRUD project which is a standard for a generic query interface on top of GraphQL. The project has started well and is in great shape right now. We have successfully added CRUD capabilities, paging, and filtering specifications.

    Coming to Debezium, I first heard about it as some DV members started contributing here, well back then it was a completely new thing for me. I started exploring more, and it was not long when I had my first interaction with Gunnar and Jiri. With a warm welcome and great team here, I am really excited to work with the Debezium Community.

    Can’t wait to learn and explore awesome things. Happy to get started here!

    --Anisha

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/11/04/streaming-vitess-at-bolt/index.html b/blog/2020/11/04/streaming-vitess-at-bolt/index.html index fe30f9ce18..c7761b41db 100644 --- a/blog/2020/11/04/streaming-vitess-at-bolt/index.html +++ b/blog/2020/11/04/streaming-vitess-at-bolt/index.html @@ -1 +1 @@ - Streaming Vitess at Bolt

    This post originally appeared on the Bolt Labs Engineering blog.

    Traditionally, MySQL has been used to power most of the backend services at Bolt. We’ve designed our schemas in a way that they’re sharded into different MySQL clusters. Each MySQL cluster contains a subset of data and consists of one primary and multiple replication nodes.

    Once data is persisted to the database, we use the Debezium MySQL Connector to capture data change events and send them to Kafka. This gives us an easy and reliable way to communicate changes between back-end microservices.

    Vitess at Bolt

    Bolt has grown considerably over the past few years, and so did the volume of data written to MySQL. Manual database sharding has become quite an expensive and long-lasting process prone to errors. So we started to evaluate more scalable databases, one of which is Vitess. Vitess is an open-source database clustering system that is based on MySQL and provides horizontal scalability for it. Originated and battle-tested at YouTube, it was later open-sourced and is used by companies like Slack, Github, JD.com to power their backend storage. It combines important MySQL features with the scalability of a NoSQL database.

    One of the most important features that Vitess provides is its built-in sharding. It allows the database to grow horizontally by adding new shards in a way that is transparent to back-end application logic. To your application, Vitess appears like a giant single database, but in fact data is partitioned into multiple physical shards behind the scenes. For any table, an arbitrary column can be chosen as the sharding key, and all inserts and updates will be seamlessly directed to a proper shard by Vitess itself.

    Figure 1 below illustrates how back-end services interact with Vitess. At a high level, services connect to the stateless VTGate instances through a load balancer. Each VTGate has the Vitess cluster’s topology cached in its memory and redirects queries to the correct shards and the correct VTTablet (and its underlying MySQL instance) within the shards. More on VTTablet is written below.

    Figure 1. Vitess architecture. Reference: https://www.planetscale.com/vitess

    Other useful features provided by Vitess are:

    • Failover (a.k.a. Reparenting) is easy and transparent for clients. Clients only talk to a VTGate who takes care of failover and service discovery of the new primary transparently.

    • It automatically rewrites “problematic” queries that could potentially cause database performance degradation.

    • It has a caching mechanism that prevents duplicate queries to reach the underlying MySQL database simultaneously. Only one query will reach the database and its result will be cached and returned to answer duplicate queries.

    • It has its connection pool and eliminates the high-memory overhead of MySQL connections. As a result, it can easily handle thousands of connections at the same time.

    • Connection timeout and transaction timeout can be configured.

    • It has minimal downtime when doing resharding operations.

    • Its VStream feature can be used by downstream CDC applications to read change events from Vitess.

    Streaming Vitess Options

    The ability to capture data changes and publish them to Apache Kafka was one of the requirements for adopting Vitess at Bolt. There were several different options we’ve considered.

    Option 1: Using Debezium MySQL Connector

    Applications connect to Vitess VTGate to send queries. VTGate supports the MySQL protocol and has a SQL parser. You can use any MySQL client (e.g. JDBC) to connect to VTGate, which redirects your query to the correct shard and returns the result to your client.

    However, VTGate is not equal to a MySQL instance, it is rather a stateless proxy to various MySQL instances. For the MySQL connector to receive change events, the Debezium MySQL connector needs to connect to a real MySQL instance. To make it more obvious, VTGate also has some known compatibility issues, which makes connecting to VTGate different from MySQL.

    Another option is to use the Debezium MySQL Connector to connect directly to the underlying MySQL instances of different shards. It has its advantages and disadvantages.

    One advantage is that for an unsharded keyspace (Vitess’s terminology for a database), the MySQL Connector can continue to work correctly and we don’t need to include additional logic or specific implementation. It should just work fine.

    One of the biggest disadvantages is that resharding operations would become more complex. For example, the GTID of the original MySQL instance would change when resharded, and the MySQL connector depends on the GTID to work correctly. We also believe that having the MySQL connector connected directly to each underlying MySQL instance defies the purpose of Vitess’s operational simplicity as a new connector has to be added (or removed) each time resharding is done. Not to mention that such operation would lead to data duplication inside Kafka brokers.

    Option 2: Using JDBC Source Connector

    We’ve also considered using the JDBC Source Connector. It allows sourcing data from any relational databases that support the JDBC driver into Kafka. Therefore, it is compatible with Vitess VTGate. It has its advantages and disadvantages as well.

    Advantages:

    • It is compatible with VTGate.

    • It handles Vitess resharding operation better. During resharding operation, reads are simply automatically redirected (by VTGate) to the target shards. It won’t generate any duplicates or lose any data.

    Disadvantages:

    • It is poll-based, meaning that the connector polls the database for new change events on a defined interval (typically every few seconds). This means that we would have a much higher latency, compared to the Debezium MySQL Connector.

    • Its offsets are managed by either the table’s incremental primary key or one of the table’s timestamp columns. If we use the timestamp column for offset, we’d have to create a secondary-index of the timestamp column for each table. This adds more constraints on our backend services. If we use the incremental primary key, we would miss the change events for row-updates because the primary key is simply not updated.

    • The topic name created by the JDBC connector doesn’t include the table’s schema name. Using the topic.prefix connector configuration would mean that we’ll have one connector per schema. At Bolt, we have a large number of schemas, which means we would need to create a large number of JDBC Source Connectors.

    • At Bolt, our downstream applications are already set up to use Debezium’s data formats and topic naming conventions, e.g. we’d need to change our downstream application’s decoding logic to the new data formats.

    • Row deletes are not captured.

    Option 3: Using VStream gRPC

    VTGate exposes a gRPC service called VStream. It is a server-side streaming service. Any gRPC client can subscribe to the VStream service to get a continuous stream of change events from the underlying MySQL instances. The change events that VStream emits have similar information to the MySQL binary logs of the underlying MySQL instances. A single VStream can even subscribe to multiple shards for a given keyspace, making it quite a convenient API to build CDC tools.

    Behind the scene, as shown in Figure 2, VStream reads change events from multiple VTTablets, one VTTablet per shard. Therefore, it doesn’t send duplicates from multiple VTTablets for a given shard. Each VTTablet is a proxy to its MySQL instance. A typical topology would include one master VTTablet and its corresponding MySQL instance, and multiple replica VTTablets, each of which is the proxy of its own replica MySQL instance. A VTTablet gets change events from its underlying MySQL instance and sends the change events back to VTGate, which in turn sends the change events back to VStream’s gRPC client.

    When subscribing to the VStream service, the client can specify a VGTID and Tablet Type (e.g. MASTER, REPLICA). The VGTID tells the position from which VStream starts to send change events. Essentially, VGTID includes a list of (keyspace, shard, shard GTID) tuples. The Tablet Type tells which MySQL instance (primary or replica) in each shard do we read change events from.

    Figure 2. VStream architecture. Reference: https://vitess.io/docs/concepts/vstream

    Some advantages of using VStream gRPC are:

    • It is a simple way to receive change events from Vitess. It is also recommended in Vitess’s documentation to use VStream to build CDC processes downstream.

    • VTGate hides the complexity of connecting to various source MySQL instances.

    • It has low latency since change events are streamed to the client as soon as they happen.

    • The change events include not only inserts and updates, but also deletes.

    • Probably one of the biggest advantages is that the change events contain the schema of each table. So you don’t have to worry about fetching each table’s schema in advance (by, for example, parsing DDLs or querying the table’s definition).

    • The change events have VGTID included, which the CDC process can store and use as the offset from where to restart the CDC process next time.

    • Also importantly, VStream is designed to work well with Vitess operations such as Resharding and Moving Tables.

    There are also some disadvantages:

    • Although it includes table schemas, some important information is still missing. For example, the Enum and Set column types don’t provide all the allowed values yet. This should be fixed in the next major release (Vitess 9) though.

    • Since VStream is a gRPC service, we cannot use the Debezium MySQL Connector out-of-the-box. However, it is quite straightforward to implement the gRPC client in other languages.

    All things considered, we’ve decided to use VStream gRPC to capture change events from Vitess and implement our Vitess Connector based on all the best practices of Debezium.

    Vitess Connector Deep Dive and Open Source

    After we’ve decided to implement our Vitess Connector, we started looking into the implementation details of various Debezium source connectors (MySQL, Postgres, SQLServer), to borrow some ideas. Almost all of them are implemented using a common Connector development framework. So it was clear we should develop the Vitess connector on top of it. Given we are very active users of the MySql Connector and we benefit from it being open-sourced, as it allows us to contribute to it things we were missing ourselves. So we decided we want to give back to community and open-source the Vitess source connector code-base under the Debezium umbrella. Please feel free to learn more at Debezium Connector Vitess. We welcome and value any contributions.

    At a high level, as you can see below, connector instances are created in Kafka Connect workers. At the time of writing, you have two options to configure the connector to read from Vitess:

    Option 1 (recommended):

    As shown in Figure 3, each connector captures change events from all shards in a specific keyspace. If the keyspace is not sharded, the connector can still capture change events from the only shard in the keyspace. When it’s the first time that the connector starts, it reads from the current VGTID position of all shards in the keyspace. Because it subscribes to all shards, it continuously captures change events from all shards and sends them to Kafka. It automatically supports the Vitess Reshard operation, there is no data loss, nor duplication.

    Figure 3. Each connector subscribes to all shards of a specific keyspace

    Option 2:

    As shown in Figure 4, each connector instance captures change events from a specific keyspace/shard pair. The connector instance gets the initial (the current) VGTID position of the keyspace/shard pair from VTCtld gRPC, which is another Vitess component. Each connector instance, independently, uses the VGTID it gets to subscribe to VStream gRPC and continuously capture change events from VStream and sends them to Kafka. To support the Vitess Reshard operation, you would need more manual operations.

    Figure 4. Each connector subscribes to one shard of a specific keyspace

    Internally, each connector task uses a gRPC thread to constantly receive change events from VStream and puts the events into an internal blocking queue. The connector task thread polls events out of the queue and sends them to Kafka, as can be seen in Figure 5.

    Figure 5. How each connector task works internally

    Replication Challenges

    While we were implementing the Vitess Connector and digging deeper into Vitess, we’ve also realized a few challenges.

    Vitess Reshard

    The Vitess connector supports the Vitess Reshard operation when the connector is configured to subscribe to all shards of a given keyspace. VStream sends a VGTID that contains the shard GTID for all shards. Vitess Resharding is transparent to users. Once it’s completed, Vitess will send the VGTID of the new shards. Therefore, the connector will use the new VGTID after reshard. However, you need to make sure that the connector is up and running when the reshard operation takes place. Especially please check that the offset topic of the connector has the new VGTID before deleting the old shards. This is because in case the old shards are deleted, VStream will not be able to recognize the VGTID from the old shards.

    If you decide to subscribe to one shard per connector, the connector does not provide out-of-the-box support for Vitess resharding. One manual workaround to support resharding is creating one new connector per target shard. For example, one new connector for the commerce/-80 shard, and another new connector for the commerce/80- shard. Bear in mind that because they’re new connectors, by default, new topics will be created, however, you could use the Debezium logical topic router to route the records to the same Kafka topics.

    Offset Management

    VStream includes a VGTID event in its response. We save the VGTID as the offset in the Kafka offset topic, so when the connector restarts, we can start from the saved VGTID. However, in rare cases when a transaction includes a huge amount of rows, VStream batches the change events into multiple responses, and only the last response has the VGTID. In such cases, we don’t have the VGTID for every change event we receive. We have a few options to solve this particular issue:

    • We can buffer all the change events in memory and wait for the last response that contains the VGTID to arrive. So all events will have the correct VGTID associated with them. A few disadvantages are that we’ll have higher latency before events are sent to Kafka. Also, memory usage could potentially increase quite a lot due to buffering. Buffering also adds complexity to the logic. We also have no control over the number of events VStream sends to us.

    • We can use the latest VGTID we have, which is the VGTID from the previous VStream response. If the connector fails and restarts when processing such a big transaction, it’ll restart from the VGTID of the previous VStream response, thus reprocessing some events. Therefore, it has at-least-once event delivery semantics and it expects the downstream to be idempotent. Since most transactions are not big enough, most VStream responses will have VGTID in the response, so the chance of having duplicates is low. In the end, we chose this approach for its at-least-once delivery guarantee and its design simplicity.

    Schema Management

    VStream’s response also includes a FIELD event. It’s a special event that contains the schemas of the tables of which the rows are affected. For example, let’s assume we have 2 tables, A and B. If we insert a few rows into table A, the FIELD event will only contain table A’s schema. The VStream is smart enough to only include the FIELD event whenever necessary. For example, when a VStream client reconnects, or when a table’s schema is changed.

    The older version of VStream includes only the column type (e.g. Integer, Varchar), no additional information such as whether the column is the primary key, whether the column has a default value, Decimal type’s scale and precision, Enum type’s allowed values, etc.

    The newer version (Vitess 8) of VStream starts to include more information on each column. This will help the connector to deserialize more accurately certain types and have a more precise schema in the change events sent to Kafka.

    Future Development Work

    • We can use VStream’s API to start streaming from the latest VGTID position, instead of getting the initial VGTID position from VTCtld gRPC. Doing so would eliminate the dependency from VTCtld.

    • We don’t support automatically extracting the primary keys from the change events yet. Currently, by default, all change events sent to Kafka have null as the key, unless the message.key.columns connector configuration is specified. Vitess recently added flags of each column in the VStream FIELD event, which allows us to implement this feature soon.

    • Add support for initial snapshots to capture all existing data before streaming changes.

    Summary

    MySQL has been used to power most of our backend services at Bolt. Due to the considerable growth of the volume of data and operational complexity, Bolt started to evaluate Vitess for its scalability and its built-in features such as resharding.

    To capture data changes from Vitess, as what we’ve been doing with Debezium MySQL Connector, we’ve considered a few options. In the end, we have implemented our own Vitess Connector based on the common Debezium connector framework. While implementing the Vitess connector, we’ve encountered a few challenges. For example, support for the Vitess reshard operation, offset management, and schema management. We reasoned about ways to address the challenges and what we worked out as solutions.

    We’ve also received quite some interest from multiple communities in this project and we’ve decided to open-source Vitess Connector under the Debezium umbrella. Please feel free to learn more, and we welcome and value any contributions.

    Kewei Shang

    Kewei is a Senior Software Engineer at Bolt, where he focuses mainly on Kafka, Debezium, change data capture, data warehousing, and creating event-driven systems.

       

    Ruslan Gibaiev

    Ruslan is a Streaming Platform lead at Bolt. He likes building distributed systems and prefers real-time data to batch processing.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Streaming Vitess at Bolt

    This post originally appeared on the Bolt Labs Engineering blog.

    Traditionally, MySQL has been used to power most of the backend services at Bolt. We’ve designed our schemas in a way that they’re sharded into different MySQL clusters. Each MySQL cluster contains a subset of data and consists of one primary and multiple replication nodes.

    Once data is persisted to the database, we use the Debezium MySQL Connector to capture data change events and send them to Kafka. This gives us an easy and reliable way to communicate changes between back-end microservices.

    Vitess at Bolt

    Bolt has grown considerably over the past few years, and so did the volume of data written to MySQL. Manual database sharding has become quite an expensive and long-lasting process prone to errors. So we started to evaluate more scalable databases, one of which is Vitess. Vitess is an open-source database clustering system that is based on MySQL and provides horizontal scalability for it. Originated and battle-tested at YouTube, it was later open-sourced and is used by companies like Slack, Github, JD.com to power their backend storage. It combines important MySQL features with the scalability of a NoSQL database.

    One of the most important features that Vitess provides is its built-in sharding. It allows the database to grow horizontally by adding new shards in a way that is transparent to back-end application logic. To your application, Vitess appears like a giant single database, but in fact data is partitioned into multiple physical shards behind the scenes. For any table, an arbitrary column can be chosen as the sharding key, and all inserts and updates will be seamlessly directed to a proper shard by Vitess itself.

    Figure 1 below illustrates how back-end services interact with Vitess. At a high level, services connect to the stateless VTGate instances through a load balancer. Each VTGate has the Vitess cluster’s topology cached in its memory and redirects queries to the correct shards and the correct VTTablet (and its underlying MySQL instance) within the shards. More on VTTablet is written below.

    Figure 1. Vitess architecture. Reference: https://www.planetscale.com/vitess

    Other useful features provided by Vitess are:

    • Failover (a.k.a. Reparenting) is easy and transparent for clients. Clients only talk to a VTGate who takes care of failover and service discovery of the new primary transparently.

    • It automatically rewrites “problematic” queries that could potentially cause database performance degradation.

    • It has a caching mechanism that prevents duplicate queries to reach the underlying MySQL database simultaneously. Only one query will reach the database and its result will be cached and returned to answer duplicate queries.

    • It has its connection pool and eliminates the high-memory overhead of MySQL connections. As a result, it can easily handle thousands of connections at the same time.

    • Connection timeout and transaction timeout can be configured.

    • It has minimal downtime when doing resharding operations.

    • Its VStream feature can be used by downstream CDC applications to read change events from Vitess.

    Streaming Vitess Options

    The ability to capture data changes and publish them to Apache Kafka was one of the requirements for adopting Vitess at Bolt. There were several different options we’ve considered.

    Option 1: Using Debezium MySQL Connector

    Applications connect to Vitess VTGate to send queries. VTGate supports the MySQL protocol and has a SQL parser. You can use any MySQL client (e.g. JDBC) to connect to VTGate, which redirects your query to the correct shard and returns the result to your client.

    However, VTGate is not equal to a MySQL instance, it is rather a stateless proxy to various MySQL instances. For the MySQL connector to receive change events, the Debezium MySQL connector needs to connect to a real MySQL instance. To make it more obvious, VTGate also has some known compatibility issues, which makes connecting to VTGate different from MySQL.

    Another option is to use the Debezium MySQL Connector to connect directly to the underlying MySQL instances of different shards. It has its advantages and disadvantages.

    One advantage is that for an unsharded keyspace (Vitess’s terminology for a database), the MySQL Connector can continue to work correctly and we don’t need to include additional logic or specific implementation. It should just work fine.

    One of the biggest disadvantages is that resharding operations would become more complex. For example, the GTID of the original MySQL instance would change when resharded, and the MySQL connector depends on the GTID to work correctly. We also believe that having the MySQL connector connected directly to each underlying MySQL instance defies the purpose of Vitess’s operational simplicity as a new connector has to be added (or removed) each time resharding is done. Not to mention that such operation would lead to data duplication inside Kafka brokers.

    Option 2: Using JDBC Source Connector

    We’ve also considered using the JDBC Source Connector. It allows sourcing data from any relational databases that support the JDBC driver into Kafka. Therefore, it is compatible with Vitess VTGate. It has its advantages and disadvantages as well.

    Advantages:

    • It is compatible with VTGate.

    • It handles Vitess resharding operation better. During resharding operation, reads are simply automatically redirected (by VTGate) to the target shards. It won’t generate any duplicates or lose any data.

    Disadvantages:

    • It is poll-based, meaning that the connector polls the database for new change events on a defined interval (typically every few seconds). This means that we would have a much higher latency, compared to the Debezium MySQL Connector.

    • Its offsets are managed by either the table’s incremental primary key or one of the table’s timestamp columns. If we use the timestamp column for offset, we’d have to create a secondary-index of the timestamp column for each table. This adds more constraints on our backend services. If we use the incremental primary key, we would miss the change events for row-updates because the primary key is simply not updated.

    • The topic name created by the JDBC connector doesn’t include the table’s schema name. Using the topic.prefix connector configuration would mean that we’ll have one connector per schema. At Bolt, we have a large number of schemas, which means we would need to create a large number of JDBC Source Connectors.

    • At Bolt, our downstream applications are already set up to use Debezium’s data formats and topic naming conventions, e.g. we’d need to change our downstream application’s decoding logic to the new data formats.

    • Row deletes are not captured.

    Option 3: Using VStream gRPC

    VTGate exposes a gRPC service called VStream. It is a server-side streaming service. Any gRPC client can subscribe to the VStream service to get a continuous stream of change events from the underlying MySQL instances. The change events that VStream emits have similar information to the MySQL binary logs of the underlying MySQL instances. A single VStream can even subscribe to multiple shards for a given keyspace, making it quite a convenient API to build CDC tools.

    Behind the scene, as shown in Figure 2, VStream reads change events from multiple VTTablets, one VTTablet per shard. Therefore, it doesn’t send duplicates from multiple VTTablets for a given shard. Each VTTablet is a proxy to its MySQL instance. A typical topology would include one master VTTablet and its corresponding MySQL instance, and multiple replica VTTablets, each of which is the proxy of its own replica MySQL instance. A VTTablet gets change events from its underlying MySQL instance and sends the change events back to VTGate, which in turn sends the change events back to VStream’s gRPC client.

    When subscribing to the VStream service, the client can specify a VGTID and Tablet Type (e.g. MASTER, REPLICA). The VGTID tells the position from which VStream starts to send change events. Essentially, VGTID includes a list of (keyspace, shard, shard GTID) tuples. The Tablet Type tells which MySQL instance (primary or replica) in each shard do we read change events from.

    Figure 2. VStream architecture. Reference: https://vitess.io/docs/concepts/vstream

    Some advantages of using VStream gRPC are:

    • It is a simple way to receive change events from Vitess. It is also recommended in Vitess’s documentation to use VStream to build CDC processes downstream.

    • VTGate hides the complexity of connecting to various source MySQL instances.

    • It has low latency since change events are streamed to the client as soon as they happen.

    • The change events include not only inserts and updates, but also deletes.

    • Probably one of the biggest advantages is that the change events contain the schema of each table. So you don’t have to worry about fetching each table’s schema in advance (by, for example, parsing DDLs or querying the table’s definition).

    • The change events have VGTID included, which the CDC process can store and use as the offset from where to restart the CDC process next time.

    • Also importantly, VStream is designed to work well with Vitess operations such as Resharding and Moving Tables.

    There are also some disadvantages:

    • Although it includes table schemas, some important information is still missing. For example, the Enum and Set column types don’t provide all the allowed values yet. This should be fixed in the next major release (Vitess 9) though.

    • Since VStream is a gRPC service, we cannot use the Debezium MySQL Connector out-of-the-box. However, it is quite straightforward to implement the gRPC client in other languages.

    All things considered, we’ve decided to use VStream gRPC to capture change events from Vitess and implement our Vitess Connector based on all the best practices of Debezium.

    Vitess Connector Deep Dive and Open Source

    After we’ve decided to implement our Vitess Connector, we started looking into the implementation details of various Debezium source connectors (MySQL, Postgres, SQLServer), to borrow some ideas. Almost all of them are implemented using a common Connector development framework. So it was clear we should develop the Vitess connector on top of it. Given we are very active users of the MySql Connector and we benefit from it being open-sourced, as it allows us to contribute to it things we were missing ourselves. So we decided we want to give back to community and open-source the Vitess source connector code-base under the Debezium umbrella. Please feel free to learn more at Debezium Connector Vitess. We welcome and value any contributions.

    At a high level, as you can see below, connector instances are created in Kafka Connect workers. At the time of writing, you have two options to configure the connector to read from Vitess:

    Option 1 (recommended):

    As shown in Figure 3, each connector captures change events from all shards in a specific keyspace. If the keyspace is not sharded, the connector can still capture change events from the only shard in the keyspace. When it’s the first time that the connector starts, it reads from the current VGTID position of all shards in the keyspace. Because it subscribes to all shards, it continuously captures change events from all shards and sends them to Kafka. It automatically supports the Vitess Reshard operation, there is no data loss, nor duplication.

    Figure 3. Each connector subscribes to all shards of a specific keyspace

    Option 2:

    As shown in Figure 4, each connector instance captures change events from a specific keyspace/shard pair. The connector instance gets the initial (the current) VGTID position of the keyspace/shard pair from VTCtld gRPC, which is another Vitess component. Each connector instance, independently, uses the VGTID it gets to subscribe to VStream gRPC and continuously capture change events from VStream and sends them to Kafka. To support the Vitess Reshard operation, you would need more manual operations.

    Figure 4. Each connector subscribes to one shard of a specific keyspace

    Internally, each connector task uses a gRPC thread to constantly receive change events from VStream and puts the events into an internal blocking queue. The connector task thread polls events out of the queue and sends them to Kafka, as can be seen in Figure 5.

    Figure 5. How each connector task works internally

    Replication Challenges

    While we were implementing the Vitess Connector and digging deeper into Vitess, we’ve also realized a few challenges.

    Vitess Reshard

    The Vitess connector supports the Vitess Reshard operation when the connector is configured to subscribe to all shards of a given keyspace. VStream sends a VGTID that contains the shard GTID for all shards. Vitess Resharding is transparent to users. Once it’s completed, Vitess will send the VGTID of the new shards. Therefore, the connector will use the new VGTID after reshard. However, you need to make sure that the connector is up and running when the reshard operation takes place. Especially please check that the offset topic of the connector has the new VGTID before deleting the old shards. This is because in case the old shards are deleted, VStream will not be able to recognize the VGTID from the old shards.

    If you decide to subscribe to one shard per connector, the connector does not provide out-of-the-box support for Vitess resharding. One manual workaround to support resharding is creating one new connector per target shard. For example, one new connector for the commerce/-80 shard, and another new connector for the commerce/80- shard. Bear in mind that because they’re new connectors, by default, new topics will be created, however, you could use the Debezium logical topic router to route the records to the same Kafka topics.

    Offset Management

    VStream includes a VGTID event in its response. We save the VGTID as the offset in the Kafka offset topic, so when the connector restarts, we can start from the saved VGTID. However, in rare cases when a transaction includes a huge amount of rows, VStream batches the change events into multiple responses, and only the last response has the VGTID. In such cases, we don’t have the VGTID for every change event we receive. We have a few options to solve this particular issue:

    • We can buffer all the change events in memory and wait for the last response that contains the VGTID to arrive. So all events will have the correct VGTID associated with them. A few disadvantages are that we’ll have higher latency before events are sent to Kafka. Also, memory usage could potentially increase quite a lot due to buffering. Buffering also adds complexity to the logic. We also have no control over the number of events VStream sends to us.

    • We can use the latest VGTID we have, which is the VGTID from the previous VStream response. If the connector fails and restarts when processing such a big transaction, it’ll restart from the VGTID of the previous VStream response, thus reprocessing some events. Therefore, it has at-least-once event delivery semantics and it expects the downstream to be idempotent. Since most transactions are not big enough, most VStream responses will have VGTID in the response, so the chance of having duplicates is low. In the end, we chose this approach for its at-least-once delivery guarantee and its design simplicity.

    Schema Management

    VStream’s response also includes a FIELD event. It’s a special event that contains the schemas of the tables of which the rows are affected. For example, let’s assume we have 2 tables, A and B. If we insert a few rows into table A, the FIELD event will only contain table A’s schema. The VStream is smart enough to only include the FIELD event whenever necessary. For example, when a VStream client reconnects, or when a table’s schema is changed.

    The older version of VStream includes only the column type (e.g. Integer, Varchar), no additional information such as whether the column is the primary key, whether the column has a default value, Decimal type’s scale and precision, Enum type’s allowed values, etc.

    The newer version (Vitess 8) of VStream starts to include more information on each column. This will help the connector to deserialize more accurately certain types and have a more precise schema in the change events sent to Kafka.

    Future Development Work

    • We can use VStream’s API to start streaming from the latest VGTID position, instead of getting the initial VGTID position from VTCtld gRPC. Doing so would eliminate the dependency from VTCtld.

    • We don’t support automatically extracting the primary keys from the change events yet. Currently, by default, all change events sent to Kafka have null as the key, unless the message.key.columns connector configuration is specified. Vitess recently added flags of each column in the VStream FIELD event, which allows us to implement this feature soon.

    • Add support for initial snapshots to capture all existing data before streaming changes.

    Summary

    MySQL has been used to power most of our backend services at Bolt. Due to the considerable growth of the volume of data and operational complexity, Bolt started to evaluate Vitess for its scalability and its built-in features such as resharding.

    To capture data changes from Vitess, as what we’ve been doing with Debezium MySQL Connector, we’ve considered a few options. In the end, we have implemented our own Vitess Connector based on the common Debezium connector framework. While implementing the Vitess connector, we’ve encountered a few challenges. For example, support for the Vitess reshard operation, offset management, and schema management. We reasoned about ways to address the challenges and what we worked out as solutions.

    We’ve also received quite some interest from multiple communities in this project and we’ve decided to open-source Vitess Connector under the Debezium umbrella. Please feel free to learn more, and we welcome and value any contributions.

    Kewei Shang

    Kewei is a Senior Software Engineer at Bolt, where he focuses mainly on Kafka, Debezium, change data capture, data warehousing, and creating event-driven systems.

       

    Ruslan Gibaiev

    Ruslan is a Streaming Platform lead at Bolt. He likes building distributed systems and prefers real-time data to batch processing.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/11/12/debezium-1-3-1-final-released/index.html b/blog/2020/11/12/debezium-1-3-1-final-released/index.html index c72584f947..e93dea9533 100644 --- a/blog/2020/11/12/debezium-1-3-1-final-released/index.html +++ b/blog/2020/11/12/debezium-1-3-1-final-released/index.html @@ -1 +1 @@ - Debezium 1.3.1.Final Released

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    In addition, there were other bugs identified and fixed in this release, including:

    • [MongoDB] Sanitization of field names not applied to nested struct fields (DBZ-2680)

    • [MySQL] MariaDB nextval function is not supported by grammar (DBZ-2671)

    • [MSSQL] Hide stack-trace when default value cannot be parsed (DBZ-2642)

    • [MySQL] Upgrade JDBC driver to 8.0.19 (DBZ-2626)

    • [MySQL] ANTLR parser fails to interpret BLOB(size) types (DBZ-2641)

    • [MySQL] Should allow non-ascii character in SQL (DBZ-2670)

    • [MySQL] Connector fails if non-existing view with same name as table is dropped (DBZ-2688)

    • [MySQL] No viable alternative at input error when column uses aggregate function names (DBZ-2738)

    • [Oracle] No snapshot found based on specified time (DBZ-1446)

    • [PostgreSQL] WAL logs are not properly flushed (DBZ-2653)

    • [Server] Event Hubs plugin support (DBZ-2660)

    Altogether, 14 issues were resolved in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures etc.

    A big thank you to everyone who helped test and identify these bugs. The team appreciates the invaluable feedback the community continually provides!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.3.1.Final Released

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    In addition, there were other bugs identified and fixed in this release, including:

    • [MongoDB] Sanitization of field names not applied to nested struct fields (DBZ-2680)

    • [MySQL] MariaDB nextval function is not supported by grammar (DBZ-2671)

    • [MSSQL] Hide stack-trace when default value cannot be parsed (DBZ-2642)

    • [MySQL] Upgrade JDBC driver to 8.0.19 (DBZ-2626)

    • [MySQL] ANTLR parser fails to interpret BLOB(size) types (DBZ-2641)

    • [MySQL] Should allow non-ascii character in SQL (DBZ-2670)

    • [MySQL] Connector fails if non-existing view with same name as table is dropped (DBZ-2688)

    • [MySQL] No viable alternative at input error when column uses aggregate function names (DBZ-2738)

    • [Oracle] No snapshot found based on specified time (DBZ-1446)

    • [PostgreSQL] WAL logs are not properly flushed (DBZ-2653)

    • [Server] Event Hubs plugin support (DBZ-2660)

    Altogether, 14 issues were resolved in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures etc.

    A big thank you to everyone who helped test and identify these bugs. The team appreciates the invaluable feedback the community continually provides!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/11/17/debezium-1-4-alpha2-released/index.html b/blog/2020/11/17/debezium-1-4-alpha2-released/index.html index 76f3d103c1..729d1435ee 100644 --- a/blog/2020/11/17/debezium-1-4-alpha2-released/index.html +++ b/blog/2020/11/17/debezium-1-4-alpha2-released/index.html @@ -1,3 +1,3 @@ Debezium 1.4.0.Alpha2 Released

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    Overall, the community fixed 37 issues for this release. Let’s take a closer look at some of the highlights.

    PostgreSQL Snapshotter completion hook

    The PostgreSQL Snapshotter API is a contract that allows for the customization of the snapshot process. This API was introduced in 0.9.3.Final and has continued to evolve in the releases since.

    A new backward compatible completion hook has been added:

    void snapshotCompleted()

    This new hook is called by the snapshot process when the snapshot has concluded, allowing implementations to clean-up any resources it may have allocated prior streaming changes.

    ExtractNewRecordState SMT field renaming support

    One of the features of the ExtractNewRecordState SMT is that the transformation can retain parts of the original message in the transformed message’s header or payload. This release extends this feature to allow specifying a new name to be used for the field when added to the message header or payload.

    For example, to add the source database’s event timestamp to the message header using the new renaming feature, the SMT configuration would be:

    transforms=unwrap
     transforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState
    -transforms.unwrap.add.headers=source.ts_ms:timestamp

    The format of the add.headers and add.fields configuration options have been improved to support a comma-separated list of fields with the syntax <OLD_FIELD>[:NEW_FIELD]. The above emitted message’s headers would now contain __timestamp rather than the default __source.ts_ms field.

    This syntax improvement remains backward compatible.

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • Oracle throw "no snapshot found based on specified time" when running flashback query DBZ-1446

    • Exception when PK definition precedes column definition DBZ-2580

    • Patroni can’t stop PostgreSQL when Debezium is streaming DBZ-2617

    • ChangeRecord informations don’t connect with the TableSchema DBZ-2679

    • MySQL connector fails on a zero date DBZ-2682

    • Oracle LogMiner doesn’t support partition tables DBZ-2683

    • DB2 doesn’t start reliably in OCP DBZ-2693

    • Dropped columns cause NPE in SqlServerConnector DBZ-2716

    • Timestamp default value in 'yyyy-mm-dd' format fails MySQL connector DBZ-2726

    • Connection timeout on write should retry DBZ-2727

    • No viable alternative at input error on "min" column DBZ-2738

    • SQLServer CI error in SqlServerConnectorIT.whenCaptureInstanceExcludesColumnsAndColumnsRenamedExpectNoErrors:1473 DBZ-2747

    • debezium-connector-db2: DB2 SQL Error: SQLCODE=-206 on DB2 for z/OS DBZ-2755

    • no viable alternative at input 'alter table order drop CONSTRAINT' DBZ-2760

    • Tests are failing on macos DBZ-2762

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +transforms.unwrap.add.headers=source.ts_ms:timestamp

    The format of the add.headers and add.fields configuration options have been improved to support a comma-separated list of fields with the syntax <OLD_FIELD>[:NEW_FIELD]. The above emitted message’s headers would now contain __timestamp rather than the default __source.ts_ms field.

    This syntax improvement remains backward compatible.

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • Oracle throw "no snapshot found based on specified time" when running flashback query DBZ-1446

    • Exception when PK definition precedes column definition DBZ-2580

    • Patroni can’t stop PostgreSQL when Debezium is streaming DBZ-2617

    • ChangeRecord informations don’t connect with the TableSchema DBZ-2679

    • MySQL connector fails on a zero date DBZ-2682

    • Oracle LogMiner doesn’t support partition tables DBZ-2683

    • DB2 doesn’t start reliably in OCP DBZ-2693

    • Dropped columns cause NPE in SqlServerConnector DBZ-2716

    • Timestamp default value in 'yyyy-mm-dd' format fails MySQL connector DBZ-2726

    • Connection timeout on write should retry DBZ-2727

    • No viable alternative at input error on "min" column DBZ-2738

    • SQLServer CI error in SqlServerConnectorIT.whenCaptureInstanceExcludesColumnsAndColumnsRenamedExpectNoErrors:1473 DBZ-2747

    • debezium-connector-db2: DB2 SQL Error: SQLCODE=-206 on DB2 for z/OS DBZ-2755

    • no viable alternative at input 'alter table order drop CONSTRAINT' DBZ-2760

    • Tests are failing on macos DBZ-2762

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/12/09/debezium-1-4-beta1-released/index.html b/blog/2020/12/09/debezium-1-4-beta1-released/index.html index dc22934aca..8d2be9462a 100644 --- a/blog/2020/12/09/debezium-1-4-beta1-released/index.html +++ b/blog/2020/12/09/debezium-1-4-beta1-released/index.html @@ -1,2 +1,2 @@ Debezium 1.4.0.Beta1 Released

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    Overall, the community fixed 39 issues for this release. Let’s take a closer look at some of the highlights.

    Distributed Tracing

    In a nutshell, distributed tracing is a pattern used to profile and monitor applications to allow quick identification of failures or performance concerns. Tracing works by having each component in a distributed process contribute a block of metadata called a "span". Each span contains unique details about that component’s unit of work. Typically a full distributed trace consists of a sequence of multiple spans.

    Distributed tracing in Debezium is enabled by using the ActivateTracingSpan SMT:

    "transforms": "tracing"
    -"transforms.tracing.type": "io.debezium.transforms.tracing.ActivateTracingSpan"

    The above configuration will lead to the emitted message header containing the tracing key/value pairs.

    A blog post discussing the distributed tracing support in depth, including end-to-end tracing for microservices data exchange via the outbox pattern, will follow up shortly.

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • DDL parser: Allow stored procedure variables in LIMIT clause DBZ-2692

    • Wrong mysql command in openshift dpeloyment docs DBZ-2746

    • long running transaction will be abandoned and ignored DBZ-2759

    • MS SQL Decimal with default value not matching the scale of the column definition cause exception DBZ-2767

    • Cassandra Connector doesn’t shut down completely DBZ-2768

    • MySQL Parser fails for BINARY collation shortcut DBZ-2771

    • PostgresConnectorIT.shouldResumeStreamingFromSlotPositionForCustomSnapshot is failing for wal2json on CI DBZ-2772

    • Connector configuration property "database.out.server.name" is not relevant for Logminer implementation but cannot be omitted DBZ-2801

    • CHARACTER VARYING mysql identifier for varchar is not supported in debezium DBZ-2821

    • try-with-resources should not be used when OkHttp Response object is returned DBZ-2827

    • EmbeddedEngine does not shutdown when commitOffsets is interrupted DBZ-2830

    • Rename user command parsing fails DBZ-2743

    A big thank you to all the contributors from the community who worked on this release: Jeremy Ford, Matt Beary, Vadzim Ramanenka, John Martin, Kewei Shang, Hoa Le, Ramesh Reddy, and Denis Andrejew.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +"transforms.tracing.type": "io.debezium.transforms.tracing.ActivateTracingSpan"

    The above configuration will lead to the emitted message header containing the tracing key/value pairs.

    A blog post discussing the distributed tracing support in depth, including end-to-end tracing for microservices data exchange via the outbox pattern, will follow up shortly.

    Bugfixes

    Also a number of bugs were fixed, e.g.:

    • DDL parser: Allow stored procedure variables in LIMIT clause DBZ-2692

    • Wrong mysql command in openshift dpeloyment docs DBZ-2746

    • long running transaction will be abandoned and ignored DBZ-2759

    • MS SQL Decimal with default value not matching the scale of the column definition cause exception DBZ-2767

    • Cassandra Connector doesn’t shut down completely DBZ-2768

    • MySQL Parser fails for BINARY collation shortcut DBZ-2771

    • PostgresConnectorIT.shouldResumeStreamingFromSlotPositionForCustomSnapshot is failing for wal2json on CI DBZ-2772

    • Connector configuration property "database.out.server.name" is not relevant for Logminer implementation but cannot be omitted DBZ-2801

    • CHARACTER VARYING mysql identifier for varchar is not supported in debezium DBZ-2821

    • try-with-resources should not be used when OkHttp Response object is returned DBZ-2827

    • EmbeddedEngine does not shutdown when commitOffsets is interrupted DBZ-2830

    • Rename user command parsing fails DBZ-2743

    A big thank you to all the contributors from the community who worked on this release: Jeremy Ford, Matt Beary, Vadzim Ramanenka, John Martin, Kewei Shang, Hoa Le, Ramesh Reddy, and Denis Andrejew.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/12/16/distributed-tracing-with-debezium/index.html b/blog/2020/12/16/distributed-tracing-with-debezium/index.html index cdc49f3539..a10c49b121 100644 --- a/blog/2020/12/16/distributed-tracing-with-debezium/index.html +++ b/blog/2020/12/16/distributed-tracing-with-debezium/index.html @@ -1,4 +1,4 @@ Distributed Tracing with Debezium

    The current pattern in application development gravitates toward microservices and microservices architecture. While this approach gives the developer teams great flexibility in terms of independent deployments and development velocity, the drawback is at hand when you try to track a bug in production.

    Monolithic applications sit nicely at a single place so you can introspect the code flows and the application’s runtime state. This is more challenging with microservice architectures, as a single business transaction can span across tens of services deployed in separate processes and compute nodes.

    You can rely on traditional methods like logging where you need to collect and correlate logs at a single place, so you can try to reconstruct the business transaction path. This is one of the tools in the box we can use, but it still can be crude and it will not provide all the necessary context. Distributed Tracing comes here to the rescue.

    Distributed Tracing

    Distributed tracing allows services to leave breadcrumbs during the execution with enough information to create an execution path of the business transaction enriched with contextual data like "who", "what", and "where". SRE teams and developers can then use it to browse through the recorded executions and check for errors or anomalies in execution that can signify either problems with deployments (services unavailabe) or even bugs.

    And this is where Debezium becomes part of the picture. Data change events, as captured by Debezium from a database, and propagated via Kafka Connect and Apache Kafka to one more more downstream consumers are part of a data flow which is very valuable to have insight into. How long does it take for change events to flow from source database to sink systems? Where is the most time spent in the pipeline? Are there any anomalies like spikes in end-to-end lags? The integration of distributed tracing with Debezium can help to answer these questions.

    OpenTracing

    There are multiple solutions for distributed tracing, but as a starting point we have decided to follow and use the OpenTracing specification. OpenTracing is an incubating project of Cloud Native Computing Foundation which guarantees that the user will be free of any vendor lock-in by adhering to an open standard.

    The OpenTracing project is in the process of being merged with OpenCensus to the improved OpenTelemetry standard. Debezium uses OpenTracing at this point for alignment reasons with other projects (e.g. Quarkus), but it will use and support OpenTelemetry in the future, too.

    A distributed trace in OpenTracing consists of a set of spans. Each span represents a logical unit of work executed. The spans can form a tree when a larger part of the business transaction represented by one span can be compounded of multiple tasks represented by additional spans that have a parent-child relationship to the main span.

    OpenTracing is only the specification and the instrumentation API. To use it you need to have an implementation, too. While Debezium could be used any OpenTracing client implementation, our examples and documentation are based on the Jaeger distributed tracing platform.

    Jaeger consists of multiple components responsible for data collection and storage as well as a graphical user interface in form of a web application. The Jaeger All-In-One container image will be used to simplify the deployment.

    Debezium and OpenTracing

    The Debezium integration with OpenTracing consists of three distinct components:

    The first one is intended for general use. The latter two must be used hand-in-hand when a (Quarkus-based) service using the outbox pattern should be traced.

    Outbox Distributed Tracing

    The biggest problem with tracing integration is keeping the trace across process boundaries so that all the related spans are recorded in the same trace to enable end-to-end tracing. The OpenTracing specification provides a way how to export and import trace related metadata so the trace can be passed among different processes.

    In the outbox extension we use this approach to export the metadata into a specific column in the outbox table, so that then the event router SMT can import them and resume the trace. In each of the steps executed one or more spans are created:

    • When an event arrives at EventDispatcher a new span outbox-write is created. It is created as a child of a current active span (e.g. started by the invocation of an REST API of the current application), or as a root span if no parent span is available.

    • The span metadata is exported into a distinct field of the outbox event.

    • The outbox event is written to the outbox table.

    • The Event Router SMT receives the event and imports the span metadata from the field

    • Two new spans are created

      • db-log-write with its start timestamp set to database write timestamp. The fields from the source block are added to the span as tags.

      • debezium-read with its start time set to the processing timestamp. Fields from the envelope are added to the span as tags.

    • Optionally, if OpenTracing integration is enabled at the Kafka producer level, a new span is created by the Kafka producer representing the write of the message to a Kafka topic with relevant metadata.

    Demo

    The outbox example was extended with distributed tracing support to demonstrate the functionality. This example contains two rudimentary microservices: an order service which exposes a REST API for placing purchase orders, and a shipment service which is notified by the order service about new purchase orders using the outbox pattern.

    This demo uses the Strimzi container image for Kafka Connect, as it already contains baked-in integration of OpenTracing at Kafka producer level.

    To try it yourself you need to:

    • check out the repository and switch to the outbox directory

    • build the services

    $ mvn clean install
    • deploy the application

    export DEBEZIUM_VERSION=1.4
     docker-compose up --build
    • register a Debezium connector to listen on the outbox table

    $ http PUT http://localhost:8083/connectors/outbox-connector/config < register-postgres.json
     HTTP/1.1 201 Created
    • execute multiple business requests

    $ http POST http://localhost:8080/orders < resources/data/create-order-request.json
    -$ http PUT http://localhost:8080/orders/1/lines/2 < resources/data/cancel-order-line-request.json

    After all the steps above were completed you should see an introduction screen of the Jaeger UI:

    Jaeger intro

    Filter on order-service as a service and click on Find Traces. Two traces should be available:

    Service traces

    Click on the addOrder service. A tree will open that displays how the initial request incoming via REST API was

    • written to the database by the outbox extension

    • read by Debezium and processed by outbox SMT

    • written to a Kafka topic

    • read from a Kafka topic by shipment-service

    • processed in the different shipment-service business methods

    Service traces

    Click on the db-log-write and debezium-read spans. The tags of each of them contain extracted Debezium-related metadata like operation or source fields:

    Service traces

    Conclusion

    In this blogpost, we have discussed what distributed tracing is and why it is beneficial to use it. We have seen how the distributed tracing integration is done at the Debezium level to enable end-to-end tracing and tried a demo application together with Jaeger UI exploration.

    While this example was focused on the specific use case of microservices data exchange via the outbox pattern, Debezium integrates with distributed tracing also independently of this particular pattern. By means of the ActivateTracingSpan SMT, Debezium can produce spans representing the time of the change in the source database itself, as well as the time of processing the event by the Debezium connector.

    Support for distributed tracing is a new feature in Debezium 1.4 (originally added in Beta1) and will evolve and mature in subsequent releases. Your feedback on this new functionality is highly welcomed!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +$ http PUT http://localhost:8080/orders/1/lines/2 < resources/data/cancel-order-line-request.json

    After all the steps above were completed you should see an introduction screen of the Jaeger UI:

    Jaeger intro

    Filter on order-service as a service and click on Find Traces. Two traces should be available:

    Service traces

    Click on the addOrder service. A tree will open that displays how the initial request incoming via REST API was

    Service traces

    Click on the db-log-write and debezium-read spans. The tags of each of them contain extracted Debezium-related metadata like operation or source fields:

    Service traces

    Conclusion

    In this blogpost, we have discussed what distributed tracing is and why it is beneficial to use it. We have seen how the distributed tracing integration is done at the Debezium level to enable end-to-end tracing and tried a demo application together with Jaeger UI exploration.

    While this example was focused on the specific use case of microservices data exchange via the outbox pattern, Debezium integrates with distributed tracing also independently of this particular pattern. By means of the ActivateTracingSpan SMT, Debezium can produce spans representing the time of the change in the source database itself, as well as the time of processing the event by the Debezium connector.

    Support for distributed tracing is a new feature in Debezium 1.4 (originally added in Beta1) and will evolve and mature in subsequent releases. Your feedback on this new functionality is highly welcomed!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2020/12/17/debezium-1-4-cr1-released/index.html b/blog/2020/12/17/debezium-1-4-cr1-released/index.html index 93cc56f732..629c931cf9 100644 --- a/blog/2020/12/17/debezium-1-4-cr1-released/index.html +++ b/blog/2020/12/17/debezium-1-4-cr1-released/index.html @@ -1 +1 @@ - Debezium 1.4.0.CR1 Released

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    Overall, the community fixed 15 issues for this release.

    Bugfixes

    A number of bugs were fixed, e.g.:

    • Document "database.oracle.version" option DBZ-2603

    • Move Cassandra connector to separate repository DBZ-2636

    • Remove link in MySQL docs section that points to the same section DBZ-2710

    • Invalid column name should fail connector with meaningful message DBZ-2836

    • Fix typos in downstream ModuleID declarations in monitoring.adoc DBZ-2838

    • Duplicate anchor ID in partials/ref-connector-monitoring-snapshot-metrics.adoc DBZ-2839

    • Oracle schema history events fail on partitioned table DBZ-2841

    • Fix additional typo in ModuleID declaration in monitoring.adoc DBZ-2843

    • Edit modularization annotations in logging.adoc DBZ-2846

    • outbox extension emits UPDATE events when delete is disabled DBZ-2847

    • Update Groovy version to 3.0.7 DBZ-2850

    A big thank you to all the contributors from the community who worked on this release: Kewei Shang.

    Outlook

    Barring any unforeseen regressions and bug reports, Debezium 1.4 Final should be out the first week of January. Until then, we wish everyone a safe and happy holiday season!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.4.0.CR1 Released

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    Overall, the community fixed 15 issues for this release.

    Bugfixes

    A number of bugs were fixed, e.g.:

    • Document "database.oracle.version" option DBZ-2603

    • Move Cassandra connector to separate repository DBZ-2636

    • Remove link in MySQL docs section that points to the same section DBZ-2710

    • Invalid column name should fail connector with meaningful message DBZ-2836

    • Fix typos in downstream ModuleID declarations in monitoring.adoc DBZ-2838

    • Duplicate anchor ID in partials/ref-connector-monitoring-snapshot-metrics.adoc DBZ-2839

    • Oracle schema history events fail on partitioned table DBZ-2841

    • Fix additional typo in ModuleID declaration in monitoring.adoc DBZ-2843

    • Edit modularization annotations in logging.adoc DBZ-2846

    • outbox extension emits UPDATE events when delete is disabled DBZ-2847

    • Update Groovy version to 3.0.7 DBZ-2850

    A big thank you to all the contributors from the community who worked on this release: Kewei Shang.

    Outlook

    Barring any unforeseen regressions and bug reports, Debezium 1.4 Final should be out the first week of January. Until then, we wish everyone a safe and happy holiday season!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/01/06/debezium-2020-recap/index.html b/blog/2021/01/06/debezium-2020-recap/index.html index f87c2bab98..4f93fa85e6 100644 --- a/blog/2021/01/06/debezium-2020-recap/index.html +++ b/blog/2021/01/06/debezium-2020-recap/index.html @@ -1 +1 @@ - Debezium in 2020 -- The Recap!

    A Happy New Year to the Debezium Community!

    May all your endavours be successful, your data be consistent, and most importantly, everyone stay safe and healthy. With 2020 in the books, I thought it’d be nice to take a look back and do a quick recap of what has happened around Debezium over the last year.

    First, some facts and numbers for you stats lovers out there:

    • After the release of Debezium 1.0 in December 2019, we successfully released a stable Debezium version at the end of each quarter, with preview releases roughly every three weeks[1]

    • About 1,400 commits in the core repo (plus many more in the other ones), 36 blog posts and release announcements, 166 threads on the mailing list (if the query in my Google inbox is to be trusted)

    • About 100 new contributors, bringing the overall number of people contributing to the Debezium core repo to 245, plus additional people contributing to the other repositories of the Debezium GitHub organization

    • The first GA release of the commercially supported Debezium offering by Red Hat, as part of Red Hat Integration

    • Two new members on the core engineering team — the more, the merrier!

    • About 1,600 additional GitHub ⭐s for the Debezium core repo, bringing the total number of star gazers to more than 4,100

    While those figures give a nice impression of the overall activity of Debezium, they don’t really tell what has been happening exactly. What’s behind the numbers? Here are some of my personal Debezium highlights from the last year:

    The year also brought a large number of blog posts and presentations from the community about their experiences with Debezium. You can find our full list of Debezium-related resources here (please send a PR for adding anything you think should be listed there). Some contents I particularly enjoyed include:

    It is just so amazing to see how engaged and helpful this community is; A big thank you to everyone for writing and talking about your experiences with Debezium and change data capture!

    I think 2020 has been a great year for the Debezium community, and I couldn’t be happier about all the things we’ve achieved together. Again, a huge thank you to each and everyone in the community contributing to the project, be it via by implementing features and bug fixes, reporting issues, engaging in discussions, answering questions on Stack Overflow, helping to spread the word in blog posts and conference talks, or otherwise!

    What’s on the roadmap for this year? It’s fair to say: "A lot" :) E.g. we’d like to rework the way snapshots are done: they should be parallelizeable, updates to the include/exclude filters should be possible, and more. The Debezium UI will see substantial expansion and improvements. We’re planning to conduct a systematic performance profiling and improvements of identified bottlenecks. There may be official support for MariaDB, as well as an operator for running Debezium Server on Kubernetes. Plus some super-cool things I cannot talk about at this point yet :)

    Onwards and Upwards!


    1. Where is Debezium 1.4, you ask? The agile bunch we are, we adhered to the "Individuals over processes" principle and decided to move this release to later this week, due to the holiday break :)

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium in 2020 -- The Recap!

    A Happy New Year to the Debezium Community!

    May all your endavours be successful, your data be consistent, and most importantly, everyone stay safe and healthy. With 2020 in the books, I thought it’d be nice to take a look back and do a quick recap of what has happened around Debezium over the last year.

    First, some facts and numbers for you stats lovers out there:

    • After the release of Debezium 1.0 in December 2019, we successfully released a stable Debezium version at the end of each quarter, with preview releases roughly every three weeks[1]

    • About 1,400 commits in the core repo (plus many more in the other ones), 36 blog posts and release announcements, 166 threads on the mailing list (if the query in my Google inbox is to be trusted)

    • About 100 new contributors, bringing the overall number of people contributing to the Debezium core repo to 245, plus additional people contributing to the other repositories of the Debezium GitHub organization

    • The first GA release of the commercially supported Debezium offering by Red Hat, as part of Red Hat Integration

    • Two new members on the core engineering team — the more, the merrier!

    • About 1,600 additional GitHub ⭐s for the Debezium core repo, bringing the total number of star gazers to more than 4,100

    While those figures give a nice impression of the overall activity of Debezium, they don’t really tell what has been happening exactly. What’s behind the numbers? Here are some of my personal Debezium highlights from the last year:

    The year also brought a large number of blog posts and presentations from the community about their experiences with Debezium. You can find our full list of Debezium-related resources here (please send a PR for adding anything you think should be listed there). Some contents I particularly enjoyed include:

    It is just so amazing to see how engaged and helpful this community is; A big thank you to everyone for writing and talking about your experiences with Debezium and change data capture!

    I think 2020 has been a great year for the Debezium community, and I couldn’t be happier about all the things we’ve achieved together. Again, a huge thank you to each and everyone in the community contributing to the project, be it via by implementing features and bug fixes, reporting issues, engaging in discussions, answering questions on Stack Overflow, helping to spread the word in blog posts and conference talks, or otherwise!

    What’s on the roadmap for this year? It’s fair to say: "A lot" :) E.g. we’d like to rework the way snapshots are done: they should be parallelizeable, updates to the include/exclude filters should be possible, and more. The Debezium UI will see substantial expansion and improvements. We’re planning to conduct a systematic performance profiling and improvements of identified bottlenecks. There may be official support for MariaDB, as well as an operator for running Debezium Server on Kubernetes. Plus some super-cool things I cannot talk about at this point yet :)

    Onwards and Upwards!


    1. Where is Debezium 1.4, you ask? The agile bunch we are, we adhered to the "Individuals over processes" principle and decided to move this release to later this week, due to the holiday break :)

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/01/07/debezium-1-4-final-released/index.html b/blog/2021/01/07/debezium-1-4-final-released/index.html index 4d134777e9..efbc2d3c7e 100644 --- a/blog/2021/01/07/debezium-1-4-final-released/index.html +++ b/blog/2021/01/07/debezium-1-4-final-released/index.html @@ -1 +1 @@ - Debezium 1.4.0.Final Released

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    Please refer to previous release announcements (Alpha1, Alpha2, Beta1, CR1) for more details. Since the CR1 release just before the holidays, we’ve focused on addressing some remaining bugs and improvements.

    Thank you to everyone involved in testing the previous releases, this is invaluable by spotting and addressing any problems with new features as well as regressions. And of course we’d like to thank all the community members contributing to this release: Alisa Houskova, Anisha Mohanty, Andrey Ignatenko, Bingqin Zhou, Babur Duisenov, Arik Cohen, Faizan, Grant Cooksey, Matt Beary, Mohamed Pudukulathan, Sergei Morozov, Kewei Shang, Mans Singh, Martin Perez, Michael Wang, Alexander Iskuskov James Gormley, jinguangyang, Kaushik Iyer, Jeremy Ford, John Martin, Vadzim Ramanenka, Ramesh Reddy, Ganesh Ramasubramanian, Denis Andrejew, Travis Elnicky, Hoa Le, Yiming Liu, Yoann Rodière, and Peter Urbanetz.

    Overall, more than 245 individuals have contributed to the Debezium project and the number of Debezium users continues to grow. As we usher in 2021, check out our recap of Debezium in 2020.

    Outlook

    With 1.4 Final released, planning for the 1.5 version (due by the end of March) is currently underway. The roadmap is still being discussed, so be sure to let us know about your requirements and feature requests. Some of the things we’re considering for this next release are:

    • Moving the MySQL connector to the CDC connector framework shared by most other Debezium connectors; this will drastically reduce maintenance burden of this connector in the future

    • Exploring more powerful snapshotting options (e.g. for parallelization and re-doing snapshots of selected tables)

    • Continued stability and improvements to the new LogMiner-based implementation for Oracle

    Until then remain safe, it’s onwards and upwards from here!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.4.0.Final Released

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    Please refer to previous release announcements (Alpha1, Alpha2, Beta1, CR1) for more details. Since the CR1 release just before the holidays, we’ve focused on addressing some remaining bugs and improvements.

    Thank you to everyone involved in testing the previous releases, this is invaluable by spotting and addressing any problems with new features as well as regressions. And of course we’d like to thank all the community members contributing to this release: Alisa Houskova, Anisha Mohanty, Andrey Ignatenko, Bingqin Zhou, Babur Duisenov, Arik Cohen, Faizan, Grant Cooksey, Matt Beary, Mohamed Pudukulathan, Sergei Morozov, Kewei Shang, Mans Singh, Martin Perez, Michael Wang, Alexander Iskuskov James Gormley, jinguangyang, Kaushik Iyer, Jeremy Ford, John Martin, Vadzim Ramanenka, Ramesh Reddy, Ganesh Ramasubramanian, Denis Andrejew, Travis Elnicky, Hoa Le, Yiming Liu, Yoann Rodière, and Peter Urbanetz.

    Overall, more than 245 individuals have contributed to the Debezium project and the number of Debezium users continues to grow. As we usher in 2021, check out our recap of Debezium in 2020.

    Outlook

    With 1.4 Final released, planning for the 1.5 version (due by the end of March) is currently underway. The roadmap is still being discussed, so be sure to let us know about your requirements and feature requests. Some of the things we’re considering for this next release are:

    • Moving the MySQL connector to the CDC connector framework shared by most other Debezium connectors; this will drastically reduce maintenance burden of this connector in the future

    • Exploring more powerful snapshotting options (e.g. for parallelization and re-doing snapshots of selected tables)

    • Continued stability and improvements to the new LogMiner-based implementation for Oracle

    Until then remain safe, it’s onwards and upwards from here!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/01/28/debezium-1-4-1-final-released/index.html b/blog/2021/01/28/debezium-1-4-1-final-released/index.html index 427708ba50..7a5aa74e14 100644 --- a/blog/2021/01/28/debezium-1-4-1-final-released/index.html +++ b/blog/2021/01/28/debezium-1-4-1-final-released/index.html @@ -1 +1 @@ - Debezium 1.4.1.Final Released

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    • [MySQL] - Use collation to get charset when charset is not set (DBZ-2922)

    • [MySQL] - Debezium Connectors are failing while reading binlog: Unknown event type 100 (DBZ-2499)

    • [MySQL] - Some column default values are not extracted correctly while reading table structure (DBZ-2698)

    • [MySQL] - Default database charset is not recorded (DBZ-2921)

    • [MySQL] - Labeled create procedure’s body is not parsed (DBZ-2972)

    • [Oracle] - Supplemental logging is required for entire database rather than per monitored table (DBZ-2711)

    • [Oracle] - Missing log file error when current SCN differs from snapshotted in Oracle connector and Logminer (DBZ-2855)

    • [Oracle] - DML statements longer than 4000 characters are incorrectly combined from V$LOGMNR_CONTENTS (DBZ-2920)

    • [Oracle] - Snapshot causes ORA-08181 exception (DBZ-2949)

    • [Oracle] - Deadlock in the XStream handler and offset commiter call concurrently (DBZ-2891)

    • [Oracle] - Debezium swallows DML exception in certain cases (DBZ-2981)

    • [Oracle] - Implement Scn as a domain type (DBZ-2518)

    • [PostgreSQL] - Instable test: PostgresConnectorIT#testCustomSnapshotterSnapshotCompleteLifecycleHook() (DBZ-2938)

    • [PostgreSQL] - Postgres connector config validation fails because current connector is occupying replication slot (DBZ-2952)

    • [SQL Server] - Add support for binary.handling.mode to the SQL Server connector (DBZ-2912)

    • [SQL Server] - Retry on "The server failed to resume the transaction" (DBZ-2959)

    • [Vitess] - Sanitise DECIMAL string from VStream (DBZ-2906)

    • [Vitess] - Vitess Connector download link missing on website (DBZ-2907)

    • [Dependencies] - Upgrade to Apache Kafka Connect 2.6.1 (DBZ-2630)

    Altogether, 35 issues were resolved in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures etc.

    A big thank you to everyone who helped test and identify these bugs and contributed to this release: Ahmed Eljami, Aman Garg, Anton Kondratev, Giovanni De Stefano, Ismail Simsek, Kewei Shang, Martin Perez, Nishant Singh, Sergei Morozov, Shuguang Xiang, siufay325, and Troy Gaines!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.4.1.Final Released

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    • [MySQL] - Use collation to get charset when charset is not set (DBZ-2922)

    • [MySQL] - Debezium Connectors are failing while reading binlog: Unknown event type 100 (DBZ-2499)

    • [MySQL] - Some column default values are not extracted correctly while reading table structure (DBZ-2698)

    • [MySQL] - Default database charset is not recorded (DBZ-2921)

    • [MySQL] - Labeled create procedure’s body is not parsed (DBZ-2972)

    • [Oracle] - Supplemental logging is required for entire database rather than per monitored table (DBZ-2711)

    • [Oracle] - Missing log file error when current SCN differs from snapshotted in Oracle connector and Logminer (DBZ-2855)

    • [Oracle] - DML statements longer than 4000 characters are incorrectly combined from V$LOGMNR_CONTENTS (DBZ-2920)

    • [Oracle] - Snapshot causes ORA-08181 exception (DBZ-2949)

    • [Oracle] - Deadlock in the XStream handler and offset commiter call concurrently (DBZ-2891)

    • [Oracle] - Debezium swallows DML exception in certain cases (DBZ-2981)

    • [Oracle] - Implement Scn as a domain type (DBZ-2518)

    • [PostgreSQL] - Instable test: PostgresConnectorIT#testCustomSnapshotterSnapshotCompleteLifecycleHook() (DBZ-2938)

    • [PostgreSQL] - Postgres connector config validation fails because current connector is occupying replication slot (DBZ-2952)

    • [SQL Server] - Add support for binary.handling.mode to the SQL Server connector (DBZ-2912)

    • [SQL Server] - Retry on "The server failed to resume the transaction" (DBZ-2959)

    • [Vitess] - Sanitise DECIMAL string from VStream (DBZ-2906)

    • [Vitess] - Vitess Connector download link missing on website (DBZ-2907)

    • [Dependencies] - Upgrade to Apache Kafka Connect 2.6.1 (DBZ-2630)

    Altogether, 35 issues were resolved in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures etc.

    A big thank you to everyone who helped test and identify these bugs and contributed to this release: Ahmed Eljami, Aman Garg, Anton Kondratev, Giovanni De Stefano, Ismail Simsek, Kewei Shang, Martin Perez, Nishant Singh, Sergei Morozov, Shuguang Xiang, siufay325, and Troy Gaines!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/02/08/debezium-1-5-alpha1-released/index.html b/blog/2021/02/08/debezium-1-5-alpha1-released/index.html index 74d652f5f2..a880f39b0e 100644 --- a/blog/2021/02/08/debezium-1-5-alpha1-released/index.html +++ b/blog/2021/02/08/debezium-1-5-alpha1-released/index.html @@ -1 +1 @@ - Debezium 1.5.0.Alpha1 Released

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    Improved LogMiner-based Capture Implementation

    Since we’ve announced the LogMiner-based implementation for the Debezium Oracle connector in Debezium 1.3, we’ve seen a constantly growing interest in this connector by folks from our lively community, who tested it out, provided feedback, logged bug reports and feature requests, submitted pull requests with fixes, and more. Based on all this input, the connector is rapidly maturing, and we aim to move the LogMiner-based implementation from "Incubating" to "Stable" state in Debezium 1.5, or 1.6 the latest. This first Alpha release of Debezium 1.5 contains a number of related improvements:

    • java.sql.SQLException: ORA-01333: failed to establish Logminer Dictionary (DBZ-2939)

    • Capture and report LogMiner state when mining session fails to start (DBZ-3055)

    • Debezium Oracle Connector will appear stuck on large SCN jumps (DBZ-2982)

    • Improve logging for Logminer adapter (DBZ-2999)

    Many thanks Martín Pérez, Milo van der Zee, Anton Kondratev, and all the others for their intensive testing, feedback, and contributions while working on this! One of the next steps in this area will be several performance-related improvements; stay tuned for the details.

    Reworked MySQL Connector

    In order to reduce the maintenance effort for all the different Debezium connectors, we’ve started work towards a common connector framework long time ago. This framework allows us to implement many features (and bug fixes) just once, and all the connectors based on this framework will be able to benefit from it. By now, almost all of the Debezium connectors have been ported to this framework, with the exception of the Cassandra and MySQL connectors.

    As of this release, also the MySQL connector provides an implementation based on this framework. Since the MySQL connector has been the first one amongst the Debezium connectors, and it has quite a few specific characteristics and features, we have decided to not simply replace the existing implementation with a new one, but rather keep both, existing and new, side by side for some time.

    This allows the new implementation to mature, also giving users the choice of which implementation to use. While the new connector implementation is the default one as of this release, you can go back to the earlier one by setting the internal.implementation option to legacy. We don’t have any immediate plans for removing the existing implementation, but focus for feature work and bug fixes will shift to the new implementation going forward. Please give the new connector implementation a try and let us know if you encounter any issues with it.

    While the new implementation is largely on par feature-wise with with the earlier one, there’s one exception: the previous, experimental support for changing the filter configuration of a connector instance isn’t part of the new implementation. We’re planning to roll out a comparable feature for all the framework-based connectors in the near future. Now that there also is a framework-based implementation for the MySQL connector, we’re planning to provide a range of improvements to snapshotting for all the (relational) connectors: for instance the aforementioned capability to change filter configurations, means of parallelizing snapshot operations, and more.

    Other Features

    Besides these key features, there’s a range of other improvements, smaller new features, and bug fixes coming with this release, including the following:

    • Correct handling of lists of user types in the Cassandra connector (DBZ-2974)

    • Multiple DDL parser fixes for MySQL and MariaDB (DBZ-3018, DBZ-3020, DBZ-3023, DBZ-3039)

    • Better snapshotting performance for large Postgres schemas with many tables (DBZ-2575)

    • Ability to emit TRUNCATE events via the Postgres connector (DBZ-2382); note that, when enabled, this adds a new op type t for this connector’s change events, so please ensure your consumers can handle such events gracefully

    • Thanks to the work of Kewei Shang, there is now instructions for following the Debezium tutorial example using the incubating connector for Vitess (DBZ-2678), which was added in Debezium 1.4:

      Vitess Tutorial Example Overview

    Altogether, 32 issues were fixed for this release. A big thank you goes out to all the community members who contributed: Bingqin Zhou, Dave Cramer Kewei Shang, Martín Pérez, Martin Sillence, Nick Murray, and Naveen Kumar.

    For the upcoming 1.5 preview releases, we’re planning to focus on further improving and stabilizing the LogMiner-based connector implementation for Oracle, wrap up some loose ends around the MySQL connector migration, and begin to explore the aforementioned snapshotting improvements.

    We’ve also made the decision to continue our efforts for creating a graphical Debezium user interface; this component is currently under active development, with support for more connectors, functionality for (re-)starting and stopping connectors, examining logs, and much more in the workings. If things go as planned, the UI will officially be part of the next Debezium 1.5 preview release!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.5.0.Alpha1 Released

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    Improved LogMiner-based Capture Implementation

    Since we’ve announced the LogMiner-based implementation for the Debezium Oracle connector in Debezium 1.3, we’ve seen a constantly growing interest in this connector by folks from our lively community, who tested it out, provided feedback, logged bug reports and feature requests, submitted pull requests with fixes, and more. Based on all this input, the connector is rapidly maturing, and we aim to move the LogMiner-based implementation from "Incubating" to "Stable" state in Debezium 1.5, or 1.6 the latest. This first Alpha release of Debezium 1.5 contains a number of related improvements:

    • java.sql.SQLException: ORA-01333: failed to establish Logminer Dictionary (DBZ-2939)

    • Capture and report LogMiner state when mining session fails to start (DBZ-3055)

    • Debezium Oracle Connector will appear stuck on large SCN jumps (DBZ-2982)

    • Improve logging for Logminer adapter (DBZ-2999)

    Many thanks Martín Pérez, Milo van der Zee, Anton Kondratev, and all the others for their intensive testing, feedback, and contributions while working on this! One of the next steps in this area will be several performance-related improvements; stay tuned for the details.

    Reworked MySQL Connector

    In order to reduce the maintenance effort for all the different Debezium connectors, we’ve started work towards a common connector framework long time ago. This framework allows us to implement many features (and bug fixes) just once, and all the connectors based on this framework will be able to benefit from it. By now, almost all of the Debezium connectors have been ported to this framework, with the exception of the Cassandra and MySQL connectors.

    As of this release, also the MySQL connector provides an implementation based on this framework. Since the MySQL connector has been the first one amongst the Debezium connectors, and it has quite a few specific characteristics and features, we have decided to not simply replace the existing implementation with a new one, but rather keep both, existing and new, side by side for some time.

    This allows the new implementation to mature, also giving users the choice of which implementation to use. While the new connector implementation is the default one as of this release, you can go back to the earlier one by setting the internal.implementation option to legacy. We don’t have any immediate plans for removing the existing implementation, but focus for feature work and bug fixes will shift to the new implementation going forward. Please give the new connector implementation a try and let us know if you encounter any issues with it.

    While the new implementation is largely on par feature-wise with with the earlier one, there’s one exception: the previous, experimental support for changing the filter configuration of a connector instance isn’t part of the new implementation. We’re planning to roll out a comparable feature for all the framework-based connectors in the near future. Now that there also is a framework-based implementation for the MySQL connector, we’re planning to provide a range of improvements to snapshotting for all the (relational) connectors: for instance the aforementioned capability to change filter configurations, means of parallelizing snapshot operations, and more.

    Other Features

    Besides these key features, there’s a range of other improvements, smaller new features, and bug fixes coming with this release, including the following:

    • Correct handling of lists of user types in the Cassandra connector (DBZ-2974)

    • Multiple DDL parser fixes for MySQL and MariaDB (DBZ-3018, DBZ-3020, DBZ-3023, DBZ-3039)

    • Better snapshotting performance for large Postgres schemas with many tables (DBZ-2575)

    • Ability to emit TRUNCATE events via the Postgres connector (DBZ-2382); note that, when enabled, this adds a new op type t for this connector’s change events, so please ensure your consumers can handle such events gracefully

    • Thanks to the work of Kewei Shang, there is now instructions for following the Debezium tutorial example using the incubating connector for Vitess (DBZ-2678), which was added in Debezium 1.4:

      Vitess Tutorial Example Overview

    Altogether, 32 issues were fixed for this release. A big thank you goes out to all the community members who contributed: Bingqin Zhou, Dave Cramer Kewei Shang, Martín Pérez, Martin Sillence, Nick Murray, and Naveen Kumar.

    For the upcoming 1.5 preview releases, we’re planning to focus on further improving and stabilizing the LogMiner-based connector implementation for Oracle, wrap up some loose ends around the MySQL connector migration, and begin to explore the aforementioned snapshotting improvements.

    We’ve also made the decision to continue our efforts for creating a graphical Debezium user interface; this component is currently under active development, with support for more connectors, functionality for (re-)starting and stopping connectors, examining logs, and much more in the workings. If things go as planned, the UI will officially be part of the next Debezium 1.5 preview release!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/02/24/debezium-1-5-beta1-released/index.html b/blog/2021/02/24/debezium-1-5-beta1-released/index.html index ea703f8282..3075724695 100644 --- a/blog/2021/02/24/debezium-1-5-beta1-released/index.html +++ b/blog/2021/02/24/debezium-1-5-beta1-released/index.html @@ -1 +1 @@ - Debezium 1.5.0.Beta1 Released

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    Debezium UI

    The different Debezium connectors provide great power and flexibility for setting up and running change data capture sources for a range of databases. But this flexibility also comes at a cost: getting started with the connectors can take some time for understanding all the different options and their semantics. Another critical aspect is operating the connectors, i.e. gaining insight into their current status and metrics, being able to react to connector failures, and more etc.

    Based on this feedback from the community, we have explored over the last few months how a graphical user interface could help with these matters. The initial proof-of-concept looked very promising, so we decided to move forward and make the UI an official component of the Debezium project. Still under active development, you already can try out the UI today (available as a container image on Docker Hub) and use it to set up Debezium connectors in your Kafka Connect clusters.

    We’ll follow up with more details on the Debezium UI in a separate blog post within the next few days, discussing its current status, the roadmap for this component, and more.

    Improved LogMiner-based CDC Implementation

    Continuing our current focus on the LogMiner-based CDC implementation for Oracle, we’ve fixed a substantial number of issues for this connector. Amongst them are:

    • Drastically improved DML parsing performance (DBZ-3078); a new hand-written parser for the LogMiner DML statements allows for better throughput of this connector, the existing external parser implementation will be removed very soon

    • Support for capturing changes from multiple schemas (DBZ-3009)

    • Support for column filtering (DBZ-3167)

    • Correct transaction metadata (DBZ-3090)

    • Several bug fixes related to log file switching and similar (DBZ-2754, DBZ-3001, DBZ-3153, etc.)

    Vitess Connector

    Led by community member Kewei Shang, the Debezium connector for Vitess now supports Vitess 9.0 (DBZ-3100). The connector also can capture changes from JSON and ENUM columns (DBZ-3115, DBZ-3124), and it implements the configuration validation API of Kafka Connect (DBZ-3117).

    Other Features

    Further fixes and improvements in this release including the following:

    • The Debezium MySQL connector can expose metadata about transaction boundaries (DBZ-3114); this is one of the first benefits we obtain by rebasing this connector onto the common Debezium connector framework, as discussed in the 1.5.0.Alpha1 release announcement

    • The Debezium connector for Postgres is tested and validated against PG 13 (DBZ-3022)

    • Ability to customize offsets when using the Debezium embedded API (DBZ-2897)

    • Support for CREATE OR REPLACE INDEX DDL when using the MySQL connector with MariaDB (DBZ-3067)

    • Infinite timestamp values supported with Postgres (DBZ-2614)

    Altogether, a grand total of 78 issues have been addressed for this release.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.5.0.Beta1 Released

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    Debezium UI

    The different Debezium connectors provide great power and flexibility for setting up and running change data capture sources for a range of databases. But this flexibility also comes at a cost: getting started with the connectors can take some time for understanding all the different options and their semantics. Another critical aspect is operating the connectors, i.e. gaining insight into their current status and metrics, being able to react to connector failures, and more etc.

    Based on this feedback from the community, we have explored over the last few months how a graphical user interface could help with these matters. The initial proof-of-concept looked very promising, so we decided to move forward and make the UI an official component of the Debezium project. Still under active development, you already can try out the UI today (available as a container image on Docker Hub) and use it to set up Debezium connectors in your Kafka Connect clusters.

    We’ll follow up with more details on the Debezium UI in a separate blog post within the next few days, discussing its current status, the roadmap for this component, and more.

    Improved LogMiner-based CDC Implementation

    Continuing our current focus on the LogMiner-based CDC implementation for Oracle, we’ve fixed a substantial number of issues for this connector. Amongst them are:

    • Drastically improved DML parsing performance (DBZ-3078); a new hand-written parser for the LogMiner DML statements allows for better throughput of this connector, the existing external parser implementation will be removed very soon

    • Support for capturing changes from multiple schemas (DBZ-3009)

    • Support for column filtering (DBZ-3167)

    • Correct transaction metadata (DBZ-3090)

    • Several bug fixes related to log file switching and similar (DBZ-2754, DBZ-3001, DBZ-3153, etc.)

    Vitess Connector

    Led by community member Kewei Shang, the Debezium connector for Vitess now supports Vitess 9.0 (DBZ-3100). The connector also can capture changes from JSON and ENUM columns (DBZ-3115, DBZ-3124), and it implements the configuration validation API of Kafka Connect (DBZ-3117).

    Other Features

    Further fixes and improvements in this release including the following:

    • The Debezium MySQL connector can expose metadata about transaction boundaries (DBZ-3114); this is one of the first benefits we obtain by rebasing this connector onto the common Debezium connector framework, as discussed in the 1.5.0.Alpha1 release announcement

    • The Debezium connector for Postgres is tested and validated against PG 13 (DBZ-3022)

    • Ability to customize offsets when using the Debezium embedded API (DBZ-2897)

    • Support for CREATE OR REPLACE INDEX DDL when using the MySQL connector with MariaDB (DBZ-3067)

    • Infinite timestamp values supported with Postgres (DBZ-2614)

    Altogether, a grand total of 78 issues have been addressed for this release.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/03/15/debezium-1-5-beta2-released/index.html b/blog/2021/03/15/debezium-1-5-beta2-released/index.html index 4854b7de49..981aecd875 100644 --- a/blog/2021/03/15/debezium-1-5-beta2-released/index.html +++ b/blog/2021/03/15/debezium-1-5-beta2-released/index.html @@ -35,4 +35,4 @@ }] } }] -}

    With this JSON payload, the signal would be inserted as:

    INSERT INTO DEBEZIUM_SIGNALS (ID, TYPE, DATA) VALUES ('1', 'schema-changes', <json-payload-string>);

    Vitess SET support

    The Vitess team improved the SET data type support in the VStream API as part of Vitess 9.0. This improvement has lead to the SET data type now being supported by the Debezium Vitess connector. This data type will be emitted as an EnumSet that will now contain all the permissible values of the column’s SET definition.

    Other Features and Fixes

    Besides the Signal Table and Vitess SET support, a few other improvements and fixes found their way into this release.

    • The Debezium connector for Oracle now uses the LogMiner-based capturing implementation by default. In order to use the XStream-based implementation, the database.connection.adapter option must be explicitly set to xstream (DBZ-3241).

    • In an earlier release of Debezium 1.5, the Oracle connector began to emit NUMBER(1) data types as BOOLEAN. Rather than this conversion be implicitly done by the connector, this behavior has been moved to an OOTB converter, NumberOneToBooleanConverter, that can be used as needed (DBZ-3208).

    • System generated index-organized tables (tables that begin with SYS_IOT_OVER) are ignored by the Oracle connector (DBZ-3036)

    • Debezium Server’s sink for AWS Kinesis can be configured with an endpoint by specifying debezium.sink.kinesis.endpoint (DBZ-3246).

    As always, you can find the complete list of all the addressed issues and upgrade procedures in the release notes.

    Many thanks to all the community members contributing to this release: Bingqin Zhou, David Seapy, Victar Malinouski, Xiao Fu, Kewei Shang, Martín Pérez, Hoa Le, Vladimir Osin, Martín Pérez, and Meng Qiu!

    What’s Next?

    Slowly wrapping up the work on the Debezium 1.5 release train, we’ve also taken the opportunity and integrated the Debezium Oracle into the main debezium source code repository. With that, all connectors of the former debezium-incubator respository have either been moved into their own, dedicated repository, or integrated into the main one. The incubator repository has been set to "Archived" mode, allowing to examine its history if needed.

    For the remaining time until 1.5 Final, we’re planning to focus on bug fixes, performance improvements, documentation adjustments and other stabilization efforts; barring any unforeseen issues, the LogMiner-based capture implementation will be promoted from Incubating to Stable state for the Final release, too. If things go as planned, there’ll be a CR (candidate release) mid next week, followed by the final release around the end of the month.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    With this JSON payload, the signal would be inserted as:

    INSERT INTO DEBEZIUM_SIGNALS (ID, TYPE, DATA) VALUES ('1', 'schema-changes', <json-payload-string>);

    Vitess SET support

    The Vitess team improved the SET data type support in the VStream API as part of Vitess 9.0. This improvement has lead to the SET data type now being supported by the Debezium Vitess connector. This data type will be emitted as an EnumSet that will now contain all the permissible values of the column’s SET definition.

    Other Features and Fixes

    Besides the Signal Table and Vitess SET support, a few other improvements and fixes found their way into this release.

    • The Debezium connector for Oracle now uses the LogMiner-based capturing implementation by default. In order to use the XStream-based implementation, the database.connection.adapter option must be explicitly set to xstream (DBZ-3241).

    • In an earlier release of Debezium 1.5, the Oracle connector began to emit NUMBER(1) data types as BOOLEAN. Rather than this conversion be implicitly done by the connector, this behavior has been moved to an OOTB converter, NumberOneToBooleanConverter, that can be used as needed (DBZ-3208).

    • System generated index-organized tables (tables that begin with SYS_IOT_OVER) are ignored by the Oracle connector (DBZ-3036)

    • Debezium Server’s sink for AWS Kinesis can be configured with an endpoint by specifying debezium.sink.kinesis.endpoint (DBZ-3246).

    As always, you can find the complete list of all the addressed issues and upgrade procedures in the release notes.

    Many thanks to all the community members contributing to this release: Bingqin Zhou, David Seapy, Victar Malinouski, Xiao Fu, Kewei Shang, Martín Pérez, Hoa Le, Vladimir Osin, Martín Pérez, and Meng Qiu!

    What’s Next?

    Slowly wrapping up the work on the Debezium 1.5 release train, we’ve also taken the opportunity and integrated the Debezium Oracle into the main debezium source code repository. With that, all connectors of the former debezium-incubator respository have either been moved into their own, dedicated repository, or integrated into the main one. The incubator repository has been set to "Archived" mode, allowing to examine its history if needed.

    For the remaining time until 1.5 Final, we’re planning to focus on bug fixes, performance improvements, documentation adjustments and other stabilization efforts; barring any unforeseen issues, the LogMiner-based capture implementation will be promoted from Incubating to Stable state for the Final release, too. If things go as planned, there’ll be a CR (candidate release) mid next week, followed by the final release around the end of the month.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/03/18/understanding-non-key-joins-with-quarkus-extension-for-kafka-streams/index.html b/blog/2021/03/18/understanding-non-key-joins-with-quarkus-extension-for-kafka-streams/index.html index f894429b49..5204c44ced 100644 --- a/blog/2021/03/18/understanding-non-key-joins-with-quarkus-extension-for-kafka-streams/index.html +++ b/blog/2021/03/18/understanding-non-key-joins-with-quarkus-extension-for-kafka-streams/index.html @@ -189,4 +189,4 @@ } } ] -}

    Summary

    The Quarkus extension for Kafka Streams comes with everything needed to run stream processing pipelines on the JVM as well as in native mode, along with additional bonuses of performing health checks, metrics, and more. For instance you could quite easily expose REST APIs for interactive queries using the Quarkus REST support, potentially retrieving data from other instances of scaled out Kafka Streams app using the MicroProfile REST client API.

    In this article we have discussed a stream processing topology of foreign key joins in Kafka Streams, and how to use the Quarkus Kafka Streams extension for running and building your application in JVM mode. You can find the complete source code of the implementation in the Debezium examples repo. If you got any questions or feedback, please let us know in the comments below. We’re looking forward to your suggestions!

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    Summary

    The Quarkus extension for Kafka Streams comes with everything needed to run stream processing pipelines on the JVM as well as in native mode, along with additional bonuses of performing health checks, metrics, and more. For instance you could quite easily expose REST APIs for interactive queries using the Quarkus REST support, potentially retrieving data from other instances of scaled out Kafka Streams app using the MicroProfile REST client API.

    In this article we have discussed a stream processing topology of foreign key joins in Kafka Streams, and how to use the Quarkus Kafka Streams extension for running and building your application in JVM mode. You can find the complete source code of the implementation in the Debezium examples repo. If you got any questions or feedback, please let us know in the comments below. We’re looking forward to your suggestions!

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/03/24/debezium-1-5-cr1-released/index.html b/blog/2021/03/24/debezium-1-5-cr1-released/index.html index 1efdfa69db..089f264cc3 100644 --- a/blog/2021/03/24/debezium-1-5-cr1-released/index.html +++ b/blog/2021/03/24/debezium-1-5-cr1-released/index.html @@ -1 +1 @@ - Debezium 1.5.0.CR1 Released

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    Oracle connector now stable

    The Oracle connector has been in incubating status for a while but recent efforts have helped to bring new features and stability to the connector. We felt at this point, the connector is ready so with this release we’re officially promoting the Oracle connector from incubating to stable.

    A tremendous effort by the community has made all this possible. The numerous contributions, bug reports, and testing has helped so much! The team and I cannot thank the community enough for all its insight, help, and dedication in making this milestone a reality so quickly!

    Cassandra connector TLS improvements

    The Cassandra connector uses the defaut available ciphers to establish SSL connections. For most use cases, this is more than satisfactory; however it does prevent the use of non-standard ciphers. In this release, the Cassandra connector property file can be configured to specify a list of ciphers in precedence order for use.

    To use this new feature, add a line to the connector’s property file like below:

    cipherSuites=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384

    Bugfixes

    A number of bugs were fixed in this release, e.g.:

    • Debezium logs "is not a valid Avro schema name" can be too verbose DBZ-2511

    • message.key.columns Regex Validation Time Complexity DBZ-2957

    • OID values don’t fit to INT32 schema DBZ-3033

    • Connector automatically restart on ORA-26653 DBZ-3236

    • UI container has no assets (JS artifacts, fonts, etc) and randomly fails building DBZ-3247

    • Revert Clob behavior for Oracle LogMiner to avoid null values DBZ-3257

    • SQL Server misses description for decimal.handling.mode DBZ-3267

    • Oracle connector ignores time.precision.mode and just uses adaptive mode DBZ-3268

    • commons-logging JAR is missing from Debezium Server distro DBZ-3277

    • MongoDB timeouts crash the whole connector DBZ-3278

    • Prefer archive logs over redo logs of the same SCN range DBZ-3292

    • LogMiner mining query may unintentionally skip records DBZ-3295

    • IndexOutOfBoundsException when LogMiner DML update statement contains a function as last column’s value DBZ-3305

    • Out of memory with mysql snapshots (regression of DBZ-94 DBZ-3309

    • Keyword ORDER is a valid identifier in MySQL grammar DBZ-3310

    • DDL statement couldn’t be parsed for ROW_FORMAT=TOKUDB_QUICKLZ DBZ-3311

    • LogMiner can miss a log switch event if too many switches occur. DBZ-3319

    • Function MOD is missing from MySQL grammar DBZ-3333

    • Incorrect SR label names in OCP testusite DBZ-3336

    • DB2 upstream tests are still using master as the default branch DBZ-3337

    As always, please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading to earlier Debezium versions.

    A big thank you to all the contributors from the community who worked on this release: Frank Koornstra and Jeremy Vigny.

    Outlook

    As we begin to wrap up Debezium 1.5 and barring any unforeseen regressions or bug reports, we expect Debezium 1.5 Final to be released by the end of March. Once 1.5 Final is out, we’ll begin our focus toward 1.6. We have quite a bit in store for Debezium 1.6 so stay tuned to learn what is lurking just around the corner!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.5.0.CR1 Released

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    Oracle connector now stable

    The Oracle connector has been in incubating status for a while but recent efforts have helped to bring new features and stability to the connector. We felt at this point, the connector is ready so with this release we’re officially promoting the Oracle connector from incubating to stable.

    A tremendous effort by the community has made all this possible. The numerous contributions, bug reports, and testing has helped so much! The team and I cannot thank the community enough for all its insight, help, and dedication in making this milestone a reality so quickly!

    Cassandra connector TLS improvements

    The Cassandra connector uses the defaut available ciphers to establish SSL connections. For most use cases, this is more than satisfactory; however it does prevent the use of non-standard ciphers. In this release, the Cassandra connector property file can be configured to specify a list of ciphers in precedence order for use.

    To use this new feature, add a line to the connector’s property file like below:

    cipherSuites=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384

    Bugfixes

    A number of bugs were fixed in this release, e.g.:

    • Debezium logs "is not a valid Avro schema name" can be too verbose DBZ-2511

    • message.key.columns Regex Validation Time Complexity DBZ-2957

    • OID values don’t fit to INT32 schema DBZ-3033

    • Connector automatically restart on ORA-26653 DBZ-3236

    • UI container has no assets (JS artifacts, fonts, etc) and randomly fails building DBZ-3247

    • Revert Clob behavior for Oracle LogMiner to avoid null values DBZ-3257

    • SQL Server misses description for decimal.handling.mode DBZ-3267

    • Oracle connector ignores time.precision.mode and just uses adaptive mode DBZ-3268

    • commons-logging JAR is missing from Debezium Server distro DBZ-3277

    • MongoDB timeouts crash the whole connector DBZ-3278

    • Prefer archive logs over redo logs of the same SCN range DBZ-3292

    • LogMiner mining query may unintentionally skip records DBZ-3295

    • IndexOutOfBoundsException when LogMiner DML update statement contains a function as last column’s value DBZ-3305

    • Out of memory with mysql snapshots (regression of DBZ-94 DBZ-3309

    • Keyword ORDER is a valid identifier in MySQL grammar DBZ-3310

    • DDL statement couldn’t be parsed for ROW_FORMAT=TOKUDB_QUICKLZ DBZ-3311

    • LogMiner can miss a log switch event if too many switches occur. DBZ-3319

    • Function MOD is missing from MySQL grammar DBZ-3333

    • Incorrect SR label names in OCP testusite DBZ-3336

    • DB2 upstream tests are still using master as the default branch DBZ-3337

    As always, please refer to the release notes for the complete list of resolved issues as well as procedures for upgrading to earlier Debezium versions.

    A big thank you to all the contributors from the community who worked on this release: Frank Koornstra and Jeremy Vigny.

    Outlook

    As we begin to wrap up Debezium 1.5 and barring any unforeseen regressions or bug reports, we expect Debezium 1.5 Final to be released by the end of March. Once 1.5 Final is out, we’ll begin our focus toward 1.6. We have quite a bit in store for Debezium 1.6 so stay tuned to learn what is lurking just around the corner!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/04/08/debezium-1-5-final-released/index.html b/blog/2021/04/08/debezium-1-5-final-released/index.html index 43533c5329..ff1472759f 100644 --- a/blog/2021/04/08/debezium-1-5-final-released/index.html +++ b/blog/2021/04/08/debezium-1-5-final-released/index.html @@ -1 +1 @@ - Debezium 1.5.0.Final Released

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    Across all the 1.5 preview and the final releases, a grand total of 236 issues has been addressed.

    For more details, please see the earlier announcements for the 1.5.0 Alpha1, Beta1, Beta2, and CR1 releases.

    Since the CR1 release, we’ve primarily focused on documentation improvements and some bug fixes. But there are two last-minute feature additions, too:

    • Support for Redis Streams in Debezium Server (DBZ-2879), allowing to propagate Debezium data change events into Redis-based logs

    • Provide LSN coordinates as standardized sequence field in Postgres change events (DBZ-2911), allowing consumers to identify duplicated events and exclude them from processing, for instance after an un-clean connector shut-down; this field will be added to the source block of other connectors going forward, too

    Please refer to the release notes of Debezium 1.5.0.Final for the complete list of resolved issues as well as procedures for upgrading to earlier versions.

    As always, a big thank you to all the members from the community who helped with this release, be it via code contributions, bug report, testing, providing insight and expertise, etc. In particular for the LogMiner-based CDC implementation for Oracle, we’ve received a huge number of contributions of all kinds. We’re deeply grateful for that and look forward a lot to further grow and improve this connector implementation! Kudos to the following individuals from the community which contributed to Debezium 1.5, bringing the overall number of contributors to the Debezium core repository to 253:

    Outlook

    Following our quarterly release cadence, Debezium 1.6 is planned for the end of June. A key issue we’re planning to work on for this version is explorations of how to improve the notion of initial snapshots, where we plan to touch on topics like resumeability, parallelization, changes of filter configuration, and more. This is going to be an open-ended investigation, but we hope to have at least a proof-of-concept implementation for some of these features, which constantly show up high on the wish list of Debezium users.

    Another focus area will again be the Debezium connector for Oracle, where we have planned several functional and performance improvements. We are also discussing to move to Java 11 as a minimum baseline for running Debezium. This is primarily caused by external dependencies which are moving on from Java 8. In case you have specific questions or potential concerns around this change, please chime into the discussion. Also, if you got specific feature requests or other input for the roadmap and future releases, please let us know via the mailing list!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.5.0.Final Released

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    Across all the 1.5 preview and the final releases, a grand total of 236 issues has been addressed.

    For more details, please see the earlier announcements for the 1.5.0 Alpha1, Beta1, Beta2, and CR1 releases.

    Since the CR1 release, we’ve primarily focused on documentation improvements and some bug fixes. But there are two last-minute feature additions, too:

    • Support for Redis Streams in Debezium Server (DBZ-2879), allowing to propagate Debezium data change events into Redis-based logs

    • Provide LSN coordinates as standardized sequence field in Postgres change events (DBZ-2911), allowing consumers to identify duplicated events and exclude them from processing, for instance after an un-clean connector shut-down; this field will be added to the source block of other connectors going forward, too

    Please refer to the release notes of Debezium 1.5.0.Final for the complete list of resolved issues as well as procedures for upgrading to earlier versions.

    As always, a big thank you to all the members from the community who helped with this release, be it via code contributions, bug report, testing, providing insight and expertise, etc. In particular for the LogMiner-based CDC implementation for Oracle, we’ve received a huge number of contributions of all kinds. We’re deeply grateful for that and look forward a lot to further grow and improve this connector implementation! Kudos to the following individuals from the community which contributed to Debezium 1.5, bringing the overall number of contributors to the Debezium core repository to 253:

    Outlook

    Following our quarterly release cadence, Debezium 1.6 is planned for the end of June. A key issue we’re planning to work on for this version is explorations of how to improve the notion of initial snapshots, where we plan to touch on topics like resumeability, parallelization, changes of filter configuration, and more. This is going to be an open-ended investigation, but we hope to have at least a proof-of-concept implementation for some of these features, which constantly show up high on the wish list of Debezium users.

    Another focus area will again be the Debezium connector for Oracle, where we have planned several functional and performance improvements. We are also discussing to move to Java 11 as a minimum baseline for running Debezium. This is primarily caused by external dependencies which are moving on from Java 8. In case you have specific questions or potential concerns around this change, please chime into the discussion. Also, if you got specific feature requests or other input for the roadmap and future releases, please let us know via the mailing list!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/05/06/debezium-1-6-alpha1-released/index.html b/blog/2021/05/06/debezium-1-6-alpha1-released/index.html index 54535e9b37..3b40d6daf4 100644 --- a/blog/2021/05/06/debezium-1-6-alpha1-released/index.html +++ b/blog/2021/05/06/debezium-1-6-alpha1-released/index.html @@ -1 +1 @@ - Debezium 1.6.0.Alpha1 Released

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    Incremental Snapshotting

    Running Debezium exhibits few pain-points

    • the necessity to execute consistent snapshot before streaming is started upon new connector restart

    • inability to trigger full or partial snapshot after having connector to be running for some time

    Starting this release we are deploying the solution to both these potential pitfalls.

    The simpler one - an ability to trigger the snapshot during the runtime is solved by ad-hoc snapshots. The user can trigger a snapshot anytime during the streaming phase by sending an execute-snapshot signal to Debezium with the list of tables to be snapshotted and the type of the snapshot to be used (only incremental is supported right now, see below). When Debezium receives the signal it will execute the snapshot of the requested tables.

    The more complex part that goes hand-in-hand with ad-hoc snapshotting is incremental snapshots. This feature allows the user to execute a snapshot of a set of tables during the streaming phase without interrupting the streaming. Moreover, contrary to the initial snapshot, the snapshot will resume upon connector restart and does not need to start from scratch again.

    The implementation of this feature is based on a novel approach to snapshotting originally invented by DBLog Framework. Debezium implementation is described in more detail in the design document.

    If you want to try the feature yourself then you need to

    • provide a signalling table

    • trigger an ad-hoc incremental snapshot by using SQL command like

    INSERT INTO myschema.debezium_signal VALUES('ad-hoc-1', 'execute-snapshot', '{"data-collections": ["schema1.table1", "schema1.table2"]}')

    Kafka Sink for Debezium Server

    Debezium connectors can either run in Kafka Connect or can be deployed using Debezium Server that provides different destination sinks. Starting with this release if a sink is Apache Kafka it is no longer necessary to use Kafka Connect but Debezium Server with Apache Kafka Sink could be used instead which may simplify operational requirements for some deployments. In this case, the regular Apache Kafka client API is used.

    Altogether, 47 issues were fixed for this release. A big thank you goes out to all the community members who contributed: Alfusainey Jallow, Bingqin Zhou, Hossein Torabi, Kyley Jex, Martín Pérez, Patrick Chu, Raphael Auv, Tommy Karlsson, WenChao Ke, and yangsanity.

    For the upcoming 1.6 preview releases, we’re planning to focus on completing the follow-up task for incremental snapshotting and provide the support for SQL Server and Db2 connectors too, further improving the LogMiner-based connector implementation for Oracle mainly related to schema evolutions and LOB support.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.6.0.Alpha1 Released

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    Incremental Snapshotting

    Running Debezium exhibits few pain-points

    • the necessity to execute consistent snapshot before streaming is started upon new connector restart

    • inability to trigger full or partial snapshot after having connector to be running for some time

    Starting this release we are deploying the solution to both these potential pitfalls.

    The simpler one - an ability to trigger the snapshot during the runtime is solved by ad-hoc snapshots. The user can trigger a snapshot anytime during the streaming phase by sending an execute-snapshot signal to Debezium with the list of tables to be snapshotted and the type of the snapshot to be used (only incremental is supported right now, see below). When Debezium receives the signal it will execute the snapshot of the requested tables.

    The more complex part that goes hand-in-hand with ad-hoc snapshotting is incremental snapshots. This feature allows the user to execute a snapshot of a set of tables during the streaming phase without interrupting the streaming. Moreover, contrary to the initial snapshot, the snapshot will resume upon connector restart and does not need to start from scratch again.

    The implementation of this feature is based on a novel approach to snapshotting originally invented by DBLog Framework. Debezium implementation is described in more detail in the design document.

    If you want to try the feature yourself then you need to

    • provide a signalling table

    • trigger an ad-hoc incremental snapshot by using SQL command like

    INSERT INTO myschema.debezium_signal VALUES('ad-hoc-1', 'execute-snapshot', '{"data-collections": ["schema1.table1", "schema1.table2"]}')

    Kafka Sink for Debezium Server

    Debezium connectors can either run in Kafka Connect or can be deployed using Debezium Server that provides different destination sinks. Starting with this release if a sink is Apache Kafka it is no longer necessary to use Kafka Connect but Debezium Server with Apache Kafka Sink could be used instead which may simplify operational requirements for some deployments. In this case, the regular Apache Kafka client API is used.

    Altogether, 47 issues were fixed for this release. A big thank you goes out to all the community members who contributed: Alfusainey Jallow, Bingqin Zhou, Hossein Torabi, Kyley Jex, Martín Pérez, Patrick Chu, Raphael Auv, Tommy Karlsson, WenChao Ke, and yangsanity.

    For the upcoming 1.6 preview releases, we’re planning to focus on completing the follow-up task for incremental snapshotting and provide the support for SQL Server and Db2 connectors too, further improving the LogMiner-based connector implementation for Oracle mainly related to schema evolutions and LOB support.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/05/20/debezium-1-6-beta1-released/index.html b/blog/2021/05/20/debezium-1-6-beta1-released/index.html index 522327f6d5..0bc7a7f81e 100644 --- a/blog/2021/05/20/debezium-1-6-beta1-released/index.html +++ b/blog/2021/05/20/debezium-1-6-beta1-released/index.html @@ -1 +1 @@ - Debezium 1.6.0.Beta1 Released

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    Incremental Snapshotting - SQL Server / Db2

    Debezium first introduced incremental snapshotting in 1.6.0.Alpha1. As discussed in this blog post, there are several pain points that exist when running Debezium:

    • the necessity to execute consistent snapshots before streaming has begun upon connector restarts

    • inability to trigger full or even partial snapshots after having the connector running for extended periods of time

    With this release, this feature has been extended to both the SQL Server and Db2 connectors. We intend to continue to roll this feature out to additional connectors in future releases.

    If you would like to try the feature yourself then you need to

    • provide a signalling table

    • trigger an ad-hoc incremental snapshot by using a SQL command like

    INSERT INTO myschema.debezium_signal VALUES('ad-hoc-1', 'execute-snapshot', '{"data-collections": ["schema1.table1", "schema1.table2"]}')

    SQL Server Performance Improvement

    The SQL Server connector option, source.timestamp.mode, controls how the timestamp for an emitted event is resolved. The default commit setting is designed to resolve the timestamp based on when the change record was committed in the database. It was identified that this method used separate JDBC calls to resolve the timestamp for an event, which caused a loss in both performance and throughput.

    This release fixes the commit mode performance problem by moving where the timestamp is resolved. This substantially increases the connector’s performance and throughput while maintaining existing functionality.

    We would like to thank Sergei Morozov for identifying and contributing a solution to this problem.

    Oracle Large Object Data Types

    In the era of "Big Data", its not all that uncommon to use data types such as BLOB and CLOB to store large object data. The Debezium Oracle connector has supported a wide range of data types and we’re happy to report that we’ve now extended that support to cover large both BLOB and CLOB for both the XStream and LogMiner based implementations.

    When emitting events that contain BLOB or CLOB data, the memory footprint of the connector as well as the emitted event’s message size will be directly impacted by the size of the large object data. As a result, the connector’s JVM process may require additional memory as well as adjusting some Kafka configurations, such as message.max.bytes.

    We encourage the community to test drive the support for these new data types and report any and all feedback.

    Other Features

    Further fixes and improvements in this release include the following:

    • The Debezium connector for Oracle now supports ALTER TABLE and DROP TABLE automatically (DBZ-2916)

    • The Debezium connector for Oracle is tested and validated using ojdbc.jar version 21.1.0.0 (DBZ-3460)

    • The Debezium connector for MonogDB could lead to lost change events where a long running snapshot was greater than the configured oplog window (DBZ-3331); the connector now validates the oplog position’s existance when streaming starts

    • The Debezium connector for Cassandra was not responding to schema changes correctly (DBZ-3417)

    Altogether, a total of 52 issues have been addressed for this release.

    As always, a big thank you to all the community members who contributed: Alfusainey Jallow, Bingqin Zhou, Cao Manh Dat, John Martin, John Wu, Mike, Olivier Jacquemart, Sergei Morozov, SiuFay, Stefan Miklosovic, Thomas Aregger, and Vadzim Ramanenka.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.6.0.Beta1 Released

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    Incremental Snapshotting - SQL Server / Db2

    Debezium first introduced incremental snapshotting in 1.6.0.Alpha1. As discussed in this blog post, there are several pain points that exist when running Debezium:

    • the necessity to execute consistent snapshots before streaming has begun upon connector restarts

    • inability to trigger full or even partial snapshots after having the connector running for extended periods of time

    With this release, this feature has been extended to both the SQL Server and Db2 connectors. We intend to continue to roll this feature out to additional connectors in future releases.

    If you would like to try the feature yourself then you need to

    • provide a signalling table

    • trigger an ad-hoc incremental snapshot by using a SQL command like

    INSERT INTO myschema.debezium_signal VALUES('ad-hoc-1', 'execute-snapshot', '{"data-collections": ["schema1.table1", "schema1.table2"]}')

    SQL Server Performance Improvement

    The SQL Server connector option, source.timestamp.mode, controls how the timestamp for an emitted event is resolved. The default commit setting is designed to resolve the timestamp based on when the change record was committed in the database. It was identified that this method used separate JDBC calls to resolve the timestamp for an event, which caused a loss in both performance and throughput.

    This release fixes the commit mode performance problem by moving where the timestamp is resolved. This substantially increases the connector’s performance and throughput while maintaining existing functionality.

    We would like to thank Sergei Morozov for identifying and contributing a solution to this problem.

    Oracle Large Object Data Types

    In the era of "Big Data", its not all that uncommon to use data types such as BLOB and CLOB to store large object data. The Debezium Oracle connector has supported a wide range of data types and we’re happy to report that we’ve now extended that support to cover large both BLOB and CLOB for both the XStream and LogMiner based implementations.

    When emitting events that contain BLOB or CLOB data, the memory footprint of the connector as well as the emitted event’s message size will be directly impacted by the size of the large object data. As a result, the connector’s JVM process may require additional memory as well as adjusting some Kafka configurations, such as message.max.bytes.

    We encourage the community to test drive the support for these new data types and report any and all feedback.

    Other Features

    Further fixes and improvements in this release include the following:

    • The Debezium connector for Oracle now supports ALTER TABLE and DROP TABLE automatically (DBZ-2916)

    • The Debezium connector for Oracle is tested and validated using ojdbc.jar version 21.1.0.0 (DBZ-3460)

    • The Debezium connector for MonogDB could lead to lost change events where a long running snapshot was greater than the configured oplog window (DBZ-3331); the connector now validates the oplog position’s existance when streaming starts

    • The Debezium connector for Cassandra was not responding to schema changes correctly (DBZ-3417)

    Altogether, a total of 52 issues have been addressed for this release.

    As always, a big thank you to all the community members who contributed: Alfusainey Jallow, Bingqin Zhou, Cao Manh Dat, John Martin, John Wu, Mike, Olivier Jacquemart, Sergei Morozov, SiuFay, Stefan Miklosovic, Thomas Aregger, and Vadzim Ramanenka.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/05/27/debezium-1-5-1-final-released/index.html b/blog/2021/05/27/debezium-1-5-1-final-released/index.html index 2686e1a7f8..cc7dd060fc 100644 --- a/blog/2021/05/27/debezium-1-5-1-final-released/index.html +++ b/blog/2021/05/27/debezium-1-5-1-final-released/index.html @@ -1 +1 @@ - Debezium 1.5.1.Final Released

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    The two most important fixes in this release are related to the MySQL database history that can get potentially corrupted under an unfavorable set of conditions.

    • If you execute a DROP TABLE command and the affected table’s name contains dashes, then the resulting statement cannot be parsed (DBZ-3485)

    • RENAME TABLE statements that contains more than one table can be stored incompletely (DBZ-3399).

    Both issues were introduced during the rewrite of MySQL connector and were not covered by integration tests.

    We strongly recommend upgrading to 1.5.1.Final before you hit these issues. If you are already affected, then the easiest way to recover from the error situation is using a new topic (or dropping the old one) for the database history and execute a schema_only_recovery snapshot. We apologize for the potential inconvenience.

    Overall, 31 issues were fixed for this release. Thanks a lot to all contributors!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.5.1.Final Released

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    The two most important fixes in this release are related to the MySQL database history that can get potentially corrupted under an unfavorable set of conditions.

    • If you execute a DROP TABLE command and the affected table’s name contains dashes, then the resulting statement cannot be parsed (DBZ-3485)

    • RENAME TABLE statements that contains more than one table can be stored incompletely (DBZ-3399).

    Both issues were introduced during the rewrite of MySQL connector and were not covered by integration tests.

    We strongly recommend upgrading to 1.5.1.Final before you hit these issues. If you are already affected, then the easiest way to recover from the error situation is using a new topic (or dropping the old one) for the database history and execute a schema_only_recovery snapshot. We apologize for the potential inconvenience.

    Overall, 31 issues were fixed for this release. Thanks a lot to all contributors!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/05/28/debezium-1-5-2-final-released/index.html b/blog/2021/05/28/debezium-1-5-2-final-released/index.html index e941e55c9b..2f1b199c3a 100644 --- a/blog/2021/05/28/debezium-1-5-2-final-released/index.html +++ b/blog/2021/05/28/debezium-1-5-2-final-released/index.html @@ -1 +1 @@ - Debezium 1.5.2.Final Released

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Debezium 1.5.1.Final was incorrectly built using Java 11. That would prevent it running in environments still using Java 8. This version is rebuilt using Java 8.

    Overall, 2 issues were fixed for this release. Thanks a lot to all contributors!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.5.2.Final Released

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Debezium 1.5.1.Final was incorrectly built using Java 11. That would prevent it running in environments still using Java 8. This version is rebuilt using Java 8.

    Overall, 2 issues were fixed for this release. Thanks a lot to all contributors!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/06/10/debezium-1-6-beta2-released/index.html b/blog/2021/06/10/debezium-1-6-beta2-released/index.html index e7ba333a08..2881b1041a 100644 --- a/blog/2021/06/10/debezium-1-6-beta2-released/index.html +++ b/blog/2021/06/10/debezium-1-6-beta2-released/index.html @@ -1 +1 @@ - Debezium 1.6.0.Beta2 Released

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Pravega Support for Debezium Server

    With Debezium Server, we’re on a mission to bring open-source change data capture to all the users, no matter which data streaming platform or commit log they are using. So we are very happy to receive a contribution which adds support for Pravega to Debezium Server. A Cloud Native Computing Foundation (CNCF) sandbox and Apache 2.0 licensed open-source project, Pravega describes itself as a "storage abstraction for continuously generated and unbounded data".

    The Debezium Server Pravega sink adapter offers two modes: non-transactional and transactional. The non-transactional mode individually writes each event in a Debezium batch to Pravega. The transactional mode writes the Debezium batch to a Pravega transaction that commits when the batch is completed.

    To learn more about using Debezium with Pravega, please refer to the documentation. Many thanks to Derek Moore for this fantastic contribution!

    Oracle Snapshotting Improvements

    The Debezium connector for Oracle received two improvements related to snapshotting:

    • Support for the snapshot.include.collection.list option (DBZ-3062); this allows to create an initial snapshot only for a subset of all those tables captured by the connector

    • New option snapshot.locking.mode which provides control over the locking behavior when the connector captures the schema of the tables (DBZ-3557); in particular, this allows to disable locking completely, which is very useful if you can guarantee that no DDL changes are happening while the connector is taking the (schema) snapshot

    In addition, there’s several bug fixes for this connector, including a few ones related to DDL and DML parsing (DBZ-3545, DBZ-3549, DBZ-3554, DBZ-3606), handling of RAC installations (DBZ-3563, DBZ-3599), and more efficient handling of LOB columns (DBZ-3556).

    Further Improvements and Bugfixes

    The Debezium connector for SQL Server saw two performance-related improvments (3486, DBZ-3515). The schemas of change events from the Postgres connector contain default values now, based on the source column definition (DBZ-2790). This comes in handy for instance when deriving downstream table schemas from a change event stream.

    Other fixes include correct identification of primary members in MongoDB replica sets (DBZ-3522), support for the JSON function in the MySQL connector’s DDL parser (DBZ-3559), and the upgrade of the Debezium Quarkus extension for implementing the outbox pattern to Quarkus 2.0 (DBZ-3550).

    Overall, 48 issues have been addressed in Debezium 1.6.0.Beta2. We’re deeply grateful to all the community members contributing to this release:

    With Beta2 through the door, we’re entering the stabilization phase for the 1.6 release cycle. You can expect one or two CRs (candidate releases), before the final release, which is planned for the end of the month, barring any unforeseen complications of cause. Besides some more bug fixes and documentation improvements we’re also intending to upgrade to Apache Kafka 2.8, which will allow you to take a sneak peak at using Debezium with ZooKeeper-less Kafka!

    In parallel, we’re going to work on the roadmap for Debezium 1.7 (due by the end of September). Please get in touch via the mailing list if you have specific feature requests for this release!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.6.0.Beta2 Released

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Pravega Support for Debezium Server

    With Debezium Server, we’re on a mission to bring open-source change data capture to all the users, no matter which data streaming platform or commit log they are using. So we are very happy to receive a contribution which adds support for Pravega to Debezium Server. A Cloud Native Computing Foundation (CNCF) sandbox and Apache 2.0 licensed open-source project, Pravega describes itself as a "storage abstraction for continuously generated and unbounded data".

    The Debezium Server Pravega sink adapter offers two modes: non-transactional and transactional. The non-transactional mode individually writes each event in a Debezium batch to Pravega. The transactional mode writes the Debezium batch to a Pravega transaction that commits when the batch is completed.

    To learn more about using Debezium with Pravega, please refer to the documentation. Many thanks to Derek Moore for this fantastic contribution!

    Oracle Snapshotting Improvements

    The Debezium connector for Oracle received two improvements related to snapshotting:

    • Support for the snapshot.include.collection.list option (DBZ-3062); this allows to create an initial snapshot only for a subset of all those tables captured by the connector

    • New option snapshot.locking.mode which provides control over the locking behavior when the connector captures the schema of the tables (DBZ-3557); in particular, this allows to disable locking completely, which is very useful if you can guarantee that no DDL changes are happening while the connector is taking the (schema) snapshot

    In addition, there’s several bug fixes for this connector, including a few ones related to DDL and DML parsing (DBZ-3545, DBZ-3549, DBZ-3554, DBZ-3606), handling of RAC installations (DBZ-3563, DBZ-3599), and more efficient handling of LOB columns (DBZ-3556).

    Further Improvements and Bugfixes

    The Debezium connector for SQL Server saw two performance-related improvments (3486, DBZ-3515). The schemas of change events from the Postgres connector contain default values now, based on the source column definition (DBZ-2790). This comes in handy for instance when deriving downstream table schemas from a change event stream.

    Other fixes include correct identification of primary members in MongoDB replica sets (DBZ-3522), support for the JSON function in the MySQL connector’s DDL parser (DBZ-3559), and the upgrade of the Debezium Quarkus extension for implementing the outbox pattern to Quarkus 2.0 (DBZ-3550).

    Overall, 48 issues have been addressed in Debezium 1.6.0.Beta2. We’re deeply grateful to all the community members contributing to this release:

    With Beta2 through the door, we’re entering the stabilization phase for the 1.6 release cycle. You can expect one or two CRs (candidate releases), before the final release, which is planned for the end of the month, barring any unforeseen complications of cause. Besides some more bug fixes and documentation improvements we’re also intending to upgrade to Apache Kafka 2.8, which will allow you to take a sneak peak at using Debezium with ZooKeeper-less Kafka!

    In parallel, we’re going to work on the roadmap for Debezium 1.7 (due by the end of September). Please get in touch via the mailing list if you have specific feature requests for this release!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/06/24/debezium-1-6-cr1-released/index.html b/blog/2021/06/24/debezium-1-6-cr1-released/index.html index 0553e0e57f..217c4d3bf4 100644 --- a/blog/2021/06/24/debezium-1-6-cr1-released/index.html +++ b/blog/2021/06/24/debezium-1-6-cr1-released/index.html @@ -1 +1 @@ - Debezium 1.6.0.CR1 Released

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Skipped operations optimizations - SQL Server

    Debezium introduced the notion of skipped.operations some time ago. This feature allows connectors to control what change events are emitted during the streaming phase, optionally omitting certain change types such as updates or deletes.

    The skipped.operations feature was originally implemented to act more like an SMT where it was a filter that was applied toward the end of the CDC pipeline just before an event was emitted. While this worked well, there was an optimization that could be achieved here because SQL Server records the change type in the capture table along side the change data. Using this knowledge, the query to fetch changes from the capture table was adjusted so that the changes returned from SQL Server are pre-filtered based on these "skipped operations".

    Overall, this optimization should improve both the time spent during iterations of capturing changes when the connector is configured to use skipped.operations as well as reduce the overall garbage collection that the connector may do over its lifetime due to generating fewer objects that are later discarded.

    Many thanks to Hossein Torabi for identifying this optimization and contributing it!

    Heartbeat Support for the Oracle Connector

    Debezium supports heartbeats across its other connectors, this feature is now available to Oracle!

    A heartbeat is a critical part of insuring that a connector’s offsets remain synchronized with Kafka. Normally, the offsets are automatically maintained by Kafka Connect when we emit an event to a topic. But there are circumstances where event emission can be infrequent enough that stale offsets become a concern.

    Regardless which adapter the Debezium Oracle connector is deployed to use, there are situations where events may be seen by the connector and in-memory offset state is updated but because we don’t emit an event to Kafka, the offsets aren’t synchronized to Kafka Connect. A great example is when the connector could be capturing changes for a table that changes very infrequently compared to other tables in the data source.

    In order to guarantee that offsets remain synchronized with Kafka Connect, specifically during periods of low capture activity, the heartbeat.interval.ms configuration option can be set to periodically keep offsets in sync.

    Oracle BLOB/CLOB Support now Opt-In

    While we added support for BLOB and CLOB data types to the Debezium connector for Oracle in an earlier 1.6 pre-release, we believe that the work towards fully supporting these types is still an ongoing one.

    There is a certain amount of overhead the connector has when handling BLOB and CLOB fields. Transactions which contain these data types require a reconciliation step at commit-time to merge certain events into a single logical emitted event. Additionally, LOB data types require a certain amount of memory footprint for their values to be kept in memory while the event to be emitted is constructed. Furthermore, users may not want to have LOB data emitted at all due to the size of the data.

    So with that, the Oracle connector’s support for LOB data types is now an opt-in only feature. This means that OOTB the connector won’t attempt to capture LOB column data. If LOB columns need to be captured by the connector, the connector option, lob.enabled, must be set to true so that the connector will pickup and process those column types.

    Lastly, we also felt that since there is still some ongoing effort for BLOB and CLOB support, it made sense to denote LOB support as incubating for the short-term until we believe the solution has matured. We strongly encourage users who want to capture LOB-based columns to enable LOB support and give it test drive and provide any and all feedback, both good or bad, so that we can continue to improve support for LOB columns.

    Further Improvements and Bugfixes

    The Debezium connector for Oracle also saw quite a number of small improvements (DBZ-3612, DBZ-3616, DBZ-3619, DBZ-3631). These improvements focused on improving the DDL parser as well as logging.

    With the recent release of Quarkus 2.0.0.Final, the Quarkus Outbox extension is now based on Quarkus 2.0 (DBZ-3602).

    Overall, 27 issues have been addressed in Debezium 1.6.0.CR1. We’re deeply grateful to all the community members contributing to this release:

    With CR1 out, we’re nearing the end of the stablization phase for the 1.6 release cycle. You can expect possibly one more CR (candidate release), before the final release, which is planned for the end of the month, barring any unforeseen complications of cause. Besides some more bug fixes and documentation improvements we’re also intending to upgrade to Apache Kafka 2.8, which will allow you to take a sneak peak at using Debezium with ZooKeeper-less Kafka!

    In parallel, we’re going to work on the roadmap for Debezium 1.7 (due by the end of September). Please get in touch via the mailing list if you have specific feature requests for this release!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.6.0.CR1 Released

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Skipped operations optimizations - SQL Server

    Debezium introduced the notion of skipped.operations some time ago. This feature allows connectors to control what change events are emitted during the streaming phase, optionally omitting certain change types such as updates or deletes.

    The skipped.operations feature was originally implemented to act more like an SMT where it was a filter that was applied toward the end of the CDC pipeline just before an event was emitted. While this worked well, there was an optimization that could be achieved here because SQL Server records the change type in the capture table along side the change data. Using this knowledge, the query to fetch changes from the capture table was adjusted so that the changes returned from SQL Server are pre-filtered based on these "skipped operations".

    Overall, this optimization should improve both the time spent during iterations of capturing changes when the connector is configured to use skipped.operations as well as reduce the overall garbage collection that the connector may do over its lifetime due to generating fewer objects that are later discarded.

    Many thanks to Hossein Torabi for identifying this optimization and contributing it!

    Heartbeat Support for the Oracle Connector

    Debezium supports heartbeats across its other connectors, this feature is now available to Oracle!

    A heartbeat is a critical part of insuring that a connector’s offsets remain synchronized with Kafka. Normally, the offsets are automatically maintained by Kafka Connect when we emit an event to a topic. But there are circumstances where event emission can be infrequent enough that stale offsets become a concern.

    Regardless which adapter the Debezium Oracle connector is deployed to use, there are situations where events may be seen by the connector and in-memory offset state is updated but because we don’t emit an event to Kafka, the offsets aren’t synchronized to Kafka Connect. A great example is when the connector could be capturing changes for a table that changes very infrequently compared to other tables in the data source.

    In order to guarantee that offsets remain synchronized with Kafka Connect, specifically during periods of low capture activity, the heartbeat.interval.ms configuration option can be set to periodically keep offsets in sync.

    Oracle BLOB/CLOB Support now Opt-In

    While we added support for BLOB and CLOB data types to the Debezium connector for Oracle in an earlier 1.6 pre-release, we believe that the work towards fully supporting these types is still an ongoing one.

    There is a certain amount of overhead the connector has when handling BLOB and CLOB fields. Transactions which contain these data types require a reconciliation step at commit-time to merge certain events into a single logical emitted event. Additionally, LOB data types require a certain amount of memory footprint for their values to be kept in memory while the event to be emitted is constructed. Furthermore, users may not want to have LOB data emitted at all due to the size of the data.

    So with that, the Oracle connector’s support for LOB data types is now an opt-in only feature. This means that OOTB the connector won’t attempt to capture LOB column data. If LOB columns need to be captured by the connector, the connector option, lob.enabled, must be set to true so that the connector will pickup and process those column types.

    Lastly, we also felt that since there is still some ongoing effort for BLOB and CLOB support, it made sense to denote LOB support as incubating for the short-term until we believe the solution has matured. We strongly encourage users who want to capture LOB-based columns to enable LOB support and give it test drive and provide any and all feedback, both good or bad, so that we can continue to improve support for LOB columns.

    Further Improvements and Bugfixes

    The Debezium connector for Oracle also saw quite a number of small improvements (DBZ-3612, DBZ-3616, DBZ-3619, DBZ-3631). These improvements focused on improving the DDL parser as well as logging.

    With the recent release of Quarkus 2.0.0.Final, the Quarkus Outbox extension is now based on Quarkus 2.0 (DBZ-3602).

    Overall, 27 issues have been addressed in Debezium 1.6.0.CR1. We’re deeply grateful to all the community members contributing to this release:

    With CR1 out, we’re nearing the end of the stablization phase for the 1.6 release cycle. You can expect possibly one more CR (candidate release), before the final release, which is planned for the end of the month, barring any unforeseen complications of cause. Besides some more bug fixes and documentation improvements we’re also intending to upgrade to Apache Kafka 2.8, which will allow you to take a sneak peak at using Debezium with ZooKeeper-less Kafka!

    In parallel, we’re going to work on the roadmap for Debezium 1.7 (due by the end of September). Please get in touch via the mailing list if you have specific feature requests for this release!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/06/30/debezium-1-6-final-released/index.html b/blog/2021/06/30/debezium-1-6-final-released/index.html index 2e96363871..aa833b25f3 100644 --- a/blog/2021/06/30/debezium-1-6-final-released/index.html +++ b/blog/2021/06/30/debezium-1-6-final-released/index.html @@ -1 +1 @@ - Debezium 1.6.0.Final Released

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    • Ability to resume an on-going snapshot after a connector restart

    • Re-snapshot selected tables during streaming, e.g. to re-bootstrap Kafka topics with change events for specific tables

    • Snapshot tables newly added to the list of captured tables after changing the filter configuration

    • Begin to stream changes while an initial snapshot is running

    Incremental snapshotting is an incubating feature as of Debezium 1.6, and we’re looking forward to your feedback on this feature. To learn more about this functionality, please refer to the individual connector docs, e.g. for the Debezium MySQL connector. There are already some follow-up improvements in this area in the workings, for instance the usage of MySQL GTIDs for setting the high/low watermarks required for this snapshotting approach, which will avoid the need for write access to the database by the connector. You can expect these improvements to be rolled out in one of the upcoming 1.7 preview releases.

    Besides incremental snapshotting, other new features in Debezium 1.6 include two brand new Debezium Server sinks, one for Apache Kafka and another for Pravega, as well as several notable enhancements to the Debezium connector for Oracle which include reacting to DDL schema changes and an opt-in, incubating feature to emit BLOB and CLOB column data types. There’s also improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements. We’ve also upgraded the Debezium Quarkus extension for implementing the outbox pattern to Quarkus 2.0.

    Across all the 1.6 preview and the final releases, a grand total of 188 issues has been addressed.

    For more details, please see the earlier announcements for the 1.6.0 Alpha1, Beta1, Beta2, and CR1 releases.

    Since the CR1 release, we’ve primarily focused on documentation improvements and some bug fixes. But there was one last-minute feature addition, too, which allows you to specify archive log locations (DBZ-3661) for the Oracle connector.

    Please refer to the release notes of Debezium 1.6.0.Final for the complete list of resolved issues as well as procedures for upgrading from earlier versions.

    As always, a big thank you to all the members from the community who helped with this release, be it via code contributions, bug report, testing, providing insight and expertise, etc. Kudos to the following individuals from the community which contributed to Debezium 1.6, bringing the overall number of contributors to the Debezium core repository to 277:

    Outlook

    As always, Debezium will be following its normal quarterly release cadence with Debezium 1.7 planned by the end of September. There are a couple of key issues we intend to work on over the course of this version including support for incremental snapshots with the MongoDB and Oracle connectors. Additionally, we plan to explore some new buffering options for the Oracle connector’s LogMiner-based implementation, work on a tool for compacting large schema history topics, expand the feature set of Debezium UI, and much more.

    In the coming week(s), keep an eye out on our roadmap as we’ll be refining this in preparations for Debezium 1.7. If you have any specific feature requests or other input for the roadmap and future releases, please let us know via the mailing list!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.6.0.Final Released

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    • Ability to resume an on-going snapshot after a connector restart

    • Re-snapshot selected tables during streaming, e.g. to re-bootstrap Kafka topics with change events for specific tables

    • Snapshot tables newly added to the list of captured tables after changing the filter configuration

    • Begin to stream changes while an initial snapshot is running

    Incremental snapshotting is an incubating feature as of Debezium 1.6, and we’re looking forward to your feedback on this feature. To learn more about this functionality, please refer to the individual connector docs, e.g. for the Debezium MySQL connector. There are already some follow-up improvements in this area in the workings, for instance the usage of MySQL GTIDs for setting the high/low watermarks required for this snapshotting approach, which will avoid the need for write access to the database by the connector. You can expect these improvements to be rolled out in one of the upcoming 1.7 preview releases.

    Besides incremental snapshotting, other new features in Debezium 1.6 include two brand new Debezium Server sinks, one for Apache Kafka and another for Pravega, as well as several notable enhancements to the Debezium connector for Oracle which include reacting to DDL schema changes and an opt-in, incubating feature to emit BLOB and CLOB column data types. There’s also improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements. We’ve also upgraded the Debezium Quarkus extension for implementing the outbox pattern to Quarkus 2.0.

    Across all the 1.6 preview and the final releases, a grand total of 188 issues has been addressed.

    For more details, please see the earlier announcements for the 1.6.0 Alpha1, Beta1, Beta2, and CR1 releases.

    Since the CR1 release, we’ve primarily focused on documentation improvements and some bug fixes. But there was one last-minute feature addition, too, which allows you to specify archive log locations (DBZ-3661) for the Oracle connector.

    Please refer to the release notes of Debezium 1.6.0.Final for the complete list of resolved issues as well as procedures for upgrading from earlier versions.

    As always, a big thank you to all the members from the community who helped with this release, be it via code contributions, bug report, testing, providing insight and expertise, etc. Kudos to the following individuals from the community which contributed to Debezium 1.6, bringing the overall number of contributors to the Debezium core repository to 277:

    Outlook

    As always, Debezium will be following its normal quarterly release cadence with Debezium 1.7 planned by the end of September. There are a couple of key issues we intend to work on over the course of this version including support for incremental snapshots with the MongoDB and Oracle connectors. Additionally, we plan to explore some new buffering options for the Oracle connector’s LogMiner-based implementation, work on a tool for compacting large schema history topics, expand the feature set of Debezium UI, and much more.

    In the coming week(s), keep an eye out on our roadmap as we’ll be refining this in preparations for Debezium 1.7. If you have any specific feature requests or other input for the roadmap and future releases, please let us know via the mailing list!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/07/07/debezium-newsletter-01-2021/index.html b/blog/2021/07/07/debezium-newsletter-01-2021/index.html index 5f99aeef31..d6cca000dc 100644 --- a/blog/2021/07/07/debezium-newsletter-01-2021/index.html +++ b/blog/2021/07/07/debezium-newsletter-01-2021/index.html @@ -1 +1 @@ - Debezium Community Newsletter 01/2021

    Welcome to the newest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    It’s been a long time since our last edition. But we are back again! In case you missed our last edition, you can check it out here.

    Upcoming Events

    Due to the ongoing global pandemic, all the conferences, and meet-ups have gone virtual. On the bright side, this means you get to attend some nice events from the comfort of your couch:

    If you’d like to have a session on Debezium at your virtual meetup or conference, please get in touch!

    Articles

    There have been several blog posts about Debezium lately; here are some of the latest ones that you should not miss:

    And if watching a talk is more your kind of thing, here’s the recording of the session Change Data Streaming Patterns in Distributed Systems from this year’s Berlin Buzzwords, by Gunnar Morling and Hans-Peter Grahsl:

    Please also check out our compiled list of resources around Debezium for even more related posts, articles, podcasts and presentations.

    Integrations

    A few cool integrations and usages of Debezium appeared over the last few weeks and months. Here are several ones which we found especially fascinating:

    Examples

    If you are getting started with Debezium, you can get hands-on learning and better understanding of how things work from the examples and demos in our examples repository. We have introduced several new examples and updated the existing ones. Out of which we’d like to highlight some new additions:

    If you are interested in showcasing a new demo or an example, please send us a GitHub pull request or reach out to us directly through our community channels found here.

    Time to Upgrade

    Debezium version 1.6.0.Final was released last week. Apart from Debezium Server sinks for Apache Kafka and Pravega, the 1.6 release brought a brand-new feature for incremental and ad-hoc snapshots, providing long-awaited capabilities like resuming long-running snapshots after a connector restart, Re-snapshotting selected tables during streaming, and snapshotting tables newly added to the list of captured tables after changing the filter configuration. A big shout-out to Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou for their paper DBLog: A Watermark Based Change-Data-Capture Framework, upon which incremental snapshotting is based.

    Given the long time since the last community newsletter, it’s also worth mentioning some of the new features added in Debezium 1.5, released in April this year: the MySQL connector saw a substantial rewrite, now also supporting transaction marker events, Debezium’s LogMiner-based CDC implementation for Oracle was declared stable, and we’ve added support for Redis Streams to Debezium Server.

    If you are using an older version, we urge you to check out the latest major release. For details on all the bug fixes, enhancements, and improvements, check out the release-notes.

    The Debezium team has also begun active development on the next version, 1.7. The major focus in 1.7 is implementing incremental snapshotting for more connectors (MongoDB, Oracle), reworking the transaction buffer for the Oracle connector, and expanding the Debezium UI. For details on the further upcoming release check out the Debezium roadmap.

    You can keep track of bug fixes, enhancements, and changes that will be coming up in the 1.7 release by visiting our releases page.

    Getting Involved

    Getting started with a huge, and an existing code base can be intimidating, but we want to make sure that the process of getting started is extremely easy and smooth for you here. We are now a vibrant community with 270+ contributors overall, and we welcome all kinds of community contributions, discussions, and enhancements. As a beginner you can grab some of the issues labeled with easy-starter if you want to dive in quickly. Below is a list of issues that are open to grab:

    • Document "schema.include.list"/"schema.exclude.list" for SQL Server connector (DBZ-2793)

    • Limit log output for "Streaming requested from LSN" warnings (DBZ-3007)

    • Create smoke test to make sure Debezium Server container image works (DBZ-3226)

    • Add signal table automatically to include list (DBZ-3293)

    • Implement support for JSON_TABLE in MySQL parser (DBZ-3575)

    • Implement window function in MySQL parser (DBZ-3576)

    • Standardize "snapshot.fetch.size default" values across connectors (DBZ-3694)

    If you are new to open source, please check out our contributing guidelines to get started!

    Call to Action

    Our community users page includes a variety of organizations that are currently using Debezium. If you are a user of Debezium, and would like to be included, please send us a GitHub pull request or reach out to us directly through our community channels found here.

    And if you haven’t yet done so, please consider adding a ⭐ for the GitHub repo; keep them coming, we’re almost at 5,000 stars!

    Also, we’d like to learn about your requirements for future Debezium versions. In particular, we’d be very curious about your feedback on the CDC-based Sagas approach mentioned above. Is it something you’d like to see supported in our Quarkus extension for instance? Please let us know about this, as well as any other feedback you may have, via the Debezium mailing list.

    Lastly, we’re planning to continue our interview series Debezium Community Stories With…​; so if you got exciting stories to tell about your usage of Debezium, please reach out!

    And as always, stay safe, and healthy. Wish you and your loved ones good health and strength.

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium Community Newsletter 01/2021

    Welcome to the newest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    It’s been a long time since our last edition. But we are back again! In case you missed our last edition, you can check it out here.

    Upcoming Events

    Due to the ongoing global pandemic, all the conferences, and meet-ups have gone virtual. On the bright side, this means you get to attend some nice events from the comfort of your couch:

    If you’d like to have a session on Debezium at your virtual meetup or conference, please get in touch!

    Articles

    There have been several blog posts about Debezium lately; here are some of the latest ones that you should not miss:

    And if watching a talk is more your kind of thing, here’s the recording of the session Change Data Streaming Patterns in Distributed Systems from this year’s Berlin Buzzwords, by Gunnar Morling and Hans-Peter Grahsl:

    Please also check out our compiled list of resources around Debezium for even more related posts, articles, podcasts and presentations.

    Integrations

    A few cool integrations and usages of Debezium appeared over the last few weeks and months. Here are several ones which we found especially fascinating:

    Examples

    If you are getting started with Debezium, you can get hands-on learning and better understanding of how things work from the examples and demos in our examples repository. We have introduced several new examples and updated the existing ones. Out of which we’d like to highlight some new additions:

    If you are interested in showcasing a new demo or an example, please send us a GitHub pull request or reach out to us directly through our community channels found here.

    Time to Upgrade

    Debezium version 1.6.0.Final was released last week. Apart from Debezium Server sinks for Apache Kafka and Pravega, the 1.6 release brought a brand-new feature for incremental and ad-hoc snapshots, providing long-awaited capabilities like resuming long-running snapshots after a connector restart, Re-snapshotting selected tables during streaming, and snapshotting tables newly added to the list of captured tables after changing the filter configuration. A big shout-out to Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou for their paper DBLog: A Watermark Based Change-Data-Capture Framework, upon which incremental snapshotting is based.

    Given the long time since the last community newsletter, it’s also worth mentioning some of the new features added in Debezium 1.5, released in April this year: the MySQL connector saw a substantial rewrite, now also supporting transaction marker events, Debezium’s LogMiner-based CDC implementation for Oracle was declared stable, and we’ve added support for Redis Streams to Debezium Server.

    If you are using an older version, we urge you to check out the latest major release. For details on all the bug fixes, enhancements, and improvements, check out the release-notes.

    The Debezium team has also begun active development on the next version, 1.7. The major focus in 1.7 is implementing incremental snapshotting for more connectors (MongoDB, Oracle), reworking the transaction buffer for the Oracle connector, and expanding the Debezium UI. For details on the further upcoming release check out the Debezium roadmap.

    You can keep track of bug fixes, enhancements, and changes that will be coming up in the 1.7 release by visiting our releases page.

    Getting Involved

    Getting started with a huge, and an existing code base can be intimidating, but we want to make sure that the process of getting started is extremely easy and smooth for you here. We are now a vibrant community with 270+ contributors overall, and we welcome all kinds of community contributions, discussions, and enhancements. As a beginner you can grab some of the issues labeled with easy-starter if you want to dive in quickly. Below is a list of issues that are open to grab:

    • Document "schema.include.list"/"schema.exclude.list" for SQL Server connector (DBZ-2793)

    • Limit log output for "Streaming requested from LSN" warnings (DBZ-3007)

    • Create smoke test to make sure Debezium Server container image works (DBZ-3226)

    • Add signal table automatically to include list (DBZ-3293)

    • Implement support for JSON_TABLE in MySQL parser (DBZ-3575)

    • Implement window function in MySQL parser (DBZ-3576)

    • Standardize "snapshot.fetch.size default" values across connectors (DBZ-3694)

    If you are new to open source, please check out our contributing guidelines to get started!

    Call to Action

    Our community users page includes a variety of organizations that are currently using Debezium. If you are a user of Debezium, and would like to be included, please send us a GitHub pull request or reach out to us directly through our community channels found here.

    And if you haven’t yet done so, please consider adding a ⭐ for the GitHub repo; keep them coming, we’re almost at 5,000 stars!

    Also, we’d like to learn about your requirements for future Debezium versions. In particular, we’d be very curious about your feedback on the CDC-based Sagas approach mentioned above. Is it something you’d like to see supported in our Quarkus extension for instance? Please let us know about this, as well as any other feedback you may have, via the Debezium mailing list.

    Lastly, we’re planning to continue our interview series Debezium Community Stories With…​; so if you got exciting stories to tell about your usage of Debezium, please reach out!

    And as always, stay safe, and healthy. Wish you and your loved ones good health and strength.

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/08/02/debezium-1-7-alpha1-released/index.html b/blog/2021/08/02/debezium-1-7-alpha1-released/index.html index ec2833774c..0dd1095500 100644 --- a/blog/2021/08/02/debezium-1-7-alpha1-released/index.html +++ b/blog/2021/08/02/debezium-1-7-alpha1-released/index.html @@ -1,2 +1,2 @@ Debezium 1.7.0.Alpha1 Released

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    Alternative Oracle Connector LogMiner processors

    The Debezium connector for Oracle uses an in-memory buffer to cache in-progress transaction changes until the transaction is either committed or rolled back. This cache can require a significant memory footprint depending on the number of events in the transaction, how many concurrent transactions are in-progress, as well as the data itself that represents each event such as large character or binary objects. This can be tough to manage for some environments that have ongoing long-running transactions.

    This release introduces a significant refactoring to the connector processing code that allows for varied modes of processing LogMiner change events. Out-of-the-box the memory-based implementation remains the default, but we’ve included a new implementation based on the Infinispan distributed data store. This implementation makes it possible to track any number of in-progress transactions regardless of their size or duration.

    To take advantage of the new Infinispan implementation, the following configuration options must be provided:

    log.mining.buffer.type=infinispan
    -log.mining.buffer.location=/path/to/shared/storage

    As this is early work, the new Infinispan buffer mode currently has a few limitations:

    • The Infinispan configuration is not exposed outside to the user for tuning

    • Only file-based cache store is supported; if you work with multi-node Kafka Connect clusters, a networked filesystem must be used for the cache store in order to support rebalancing of connector tasks in the Connect cluster

    This should not stop you from giving it a test drive and send as much feedback as possible to us for further improvements. We expect to remove the above restrictions in the next release.

    Incremental Snapshotting

    The major feature of the 1.6 release has got also new improvements. The Debezium connector for Oracle now supports incremental snapshots in the same way as the other connectors, so Oracle users now can easily add new tables to the capture list and get them snapshotted on the fly.

    For the MySQL connector, there is a new incremental snapshot mode that could be used for databases that do not allow writing to the signal table. Kate Galieva invented a method based on GTID lists that could be used to provide watermarking without writing to the database. Thank you for the nice contribution!

    Altogether, 49 issues were fixed for this release. A big thank you goes out to all the community members who contributed: Blake Peno, Alfusainey Jallow, Bingqin Zhou, Hossein Torabi, Katerina Galieva, Kyley Jex, Martín Pérez, Naveen Kumar KR, Patrick Chu, Pavel Strashkin, Raphael Auv, Sergei Morozov, Thiago Dantas, Tin Nguyen, Tommy Karlsson, WenChao Ke, and yangsanity.

    For the upcoming 1.7 preview releases, we’re planning to focus on completing the follow-up task for the Oracle LogMiner processor and provide the support for JDBC based process too. We will explore incremental snapshotting support for MongoDB and do research on MariaDB support.

    Release of 1.6.1.Final

    While the team’s focus is primarily on 1.7, we have recently fixed 9 issues to the 1.6 stream and released 1.6.1.Final. You can check out the release notes for details. We recommend if you’re using a release prior to 1.6, take this opportunity to upgrade and get access to the latest new features!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +log.mining.buffer.location=/path/to/shared/storage

    As this is early work, the new Infinispan buffer mode currently has a few limitations:

    This should not stop you from giving it a test drive and send as much feedback as possible to us for further improvements. We expect to remove the above restrictions in the next release.

    Incremental Snapshotting

    The major feature of the 1.6 release has got also new improvements. The Debezium connector for Oracle now supports incremental snapshots in the same way as the other connectors, so Oracle users now can easily add new tables to the capture list and get them snapshotted on the fly.

    For the MySQL connector, there is a new incremental snapshot mode that could be used for databases that do not allow writing to the signal table. Kate Galieva invented a method based on GTID lists that could be used to provide watermarking without writing to the database. Thank you for the nice contribution!

    Altogether, 49 issues were fixed for this release. A big thank you goes out to all the community members who contributed: Blake Peno, Alfusainey Jallow, Bingqin Zhou, Hossein Torabi, Katerina Galieva, Kyley Jex, Martín Pérez, Naveen Kumar KR, Patrick Chu, Pavel Strashkin, Raphael Auv, Sergei Morozov, Thiago Dantas, Tin Nguyen, Tommy Karlsson, WenChao Ke, and yangsanity.

    For the upcoming 1.7 preview releases, we’re planning to focus on completing the follow-up task for the Oracle LogMiner processor and provide the support for JDBC based process too. We will explore incremental snapshotting support for MongoDB and do research on MariaDB support.

    Release of 1.6.1.Final

    While the team’s focus is primarily on 1.7, we have recently fixed 9 issues to the 1.6 stream and released 1.6.1.Final. You can check out the release notes for details. We recommend if you’re using a release prior to 1.6, take this opportunity to upgrade and get access to the latest new features!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/08/12/introducing-debezium-ui/index.html b/blog/2021/08/12/introducing-debezium-ui/index.html index a722a7315e..be622b86d6 100644 --- a/blog/2021/08/12/introducing-debezium-ui/index.html +++ b/blog/2021/08/12/introducing-debezium-ui/index.html @@ -1 +1 @@ - Introducing the Debezium UI

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    Let’s take a closer look at some features of the UI in the following.

    UI Connector List

    The main page of the UI displays all the registered connectors. Some of the highlights of the main page are as follows:

    • Kafka connect cluster can be selected via the dropdown in the header.

    • Connector table shows each connector with it’s type (MySQL, PostgreSQL, MongoDB), connector status and connector tasks.

    • A connector row can be expanded to show more details, as shown below with the 'testPostgres' connector. Metrics are shown in the expansion area (Note: this feature is still under development and not functional yet). Connector tasks are shown, with ability to Restart the task if desired.

    • The kebab menu at the right of each connector row provides actions which allow the user to Pause, Resume, Restart or Delete the connector.

    UI Create Connector Wizard

    The user can create a connector by clicking on the Create a connector button on the main page. The first two steps of the wizard are required, but the remaining steps are optional. Each step will validate the user entries and provide feedback if there are problems. After completing steps 1 and 2 successfully, the user can proceed to the final page to review and create the connector.

    Create Connector - Connector type (required)

    Choose the type of connector in step 1. Currently the MongoDB, MySQL and PostgreSQL connector types are supported. Addition of more connector types is currently in progress.

    Create Connector - Properties (required)

    The basic connection properties for the selected connector are entered in step 2, and the properties must be validated before proceeding. Advanced connection properties are also provided in a separate section of this step. Upon successful validation, the user may proceed to the next steps (Additional properties) - or they can elect to bypass the additional properties and proceed directly to Review.

    Create Connector - Additional properties (optional)

    The Additional properties are optional and can be summarized as follows:

    • Filter definition - entry of regular expressions which define the filters for inclusion/exclusion of the items that will be included for CDC. The included items are displayed as the filters are entered and applied.

    • Data options - Snapshot and Mapping properties (optional). The defaults can be viewed and changed if desired.

    • Runtime options - Engine and Heartbeat properties (optional). The defaults can be viewed and changed if desired.

    Create Connector - Review

    The Review step provides a summary of the configuration that will be used to create the connector. If happy with the selections, click 'Finish' to create the connector. If the properties need adjustment, navigate back to the earlier steps.

    Design Considerations

    The UI is implemented as a Quarkus-based web application. The backend is configured with the URL(s) of one or more Kafka Connect clusters and provides a REST interface for the frontend. The frontend user interface uses ReactJS as the primary technology, utilizing Patternfly react components and design patterns.

    As with everything in Debezium, the UI is fully open source (Apache License Version 2.0). You can find the UI Source Code under the Debezium organization on Github.

    Trying It Out Yourself

    Debezium UI Container Image

    The Debezium UI container image is available for running the UI. E.g. run the following comand to start the UI and connect it to an existing Kafka Connect instance via Docker (where KAFKA_CONNECT_URI supplies a comma-separated list of the available Kafka Connect URI(s)):

    $ docker run -it --rm --name debezium-ui -p 8080:8080 -e KAFKA_CONNECT_URI=http://connect:8083 debezium/debezium-ui:1.7

    The UI connects to Kafka Connect via REST, so you need to make sure that the latter is reachable, e.g. by running both components on the same Docker network.

    Currently, the UI connects to un-authenticated Kafka Connect instances. Also, there’s no authorization or authentication implemented in the UI itself yet. Until that is the case, you should secure the components e.g. with your own proxy for authorization, if needed.

    Self-contained example

    We have also created a self-contained example UI demo, which is included under debezium-examples on Github. The UI demo includes a Docker Compose file which brings up several sources with data as well as the UI. Please refer to the README file for more details on running the Debezium UI demo.

    To learn more about the Debezium UI, please refer to the reference documentation

    Next Steps

    We plan to continue with improvements and new features for the UI in the coming releases. Some items under consideration:

    • Incorporation of more Debezium connector types, such as the ones for SQL Server and Oracle

    • Add capability to configure topic creation settings and single message transformations

    • Addition and improvement of connector metrics and monitoring

    • Add capability for viewing and editing connector properties after creation

    • …​And more!

    We’d also be very happy to learn about your requirements and feedback on the Debezium UI. Please let us know in the comments below, or send a message to our mailing list.

    A big thank you to the team who have contributed in many ways: Ashique Ansari, Indra Shukla, June Zhang, Na Ding, René Kerner and Gunnar Morling!

    Mark Drilling

    Mark is a software developer at Red Hat. He has spent most of his career at Red Hat working on various Data Virtualization projects, and now works on Debezium. He lives in O'Fallon, Missouri USA.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Introducing the Debezium UI

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    Let’s take a closer look at some features of the UI in the following.

    UI Connector List

    The main page of the UI displays all the registered connectors. Some of the highlights of the main page are as follows:

    • Kafka connect cluster can be selected via the dropdown in the header.

    • Connector table shows each connector with it’s type (MySQL, PostgreSQL, MongoDB), connector status and connector tasks.

    • A connector row can be expanded to show more details, as shown below with the 'testPostgres' connector. Metrics are shown in the expansion area (Note: this feature is still under development and not functional yet). Connector tasks are shown, with ability to Restart the task if desired.

    • The kebab menu at the right of each connector row provides actions which allow the user to Pause, Resume, Restart or Delete the connector.

    UI Create Connector Wizard

    The user can create a connector by clicking on the Create a connector button on the main page. The first two steps of the wizard are required, but the remaining steps are optional. Each step will validate the user entries and provide feedback if there are problems. After completing steps 1 and 2 successfully, the user can proceed to the final page to review and create the connector.

    Create Connector - Connector type (required)

    Choose the type of connector in step 1. Currently the MongoDB, MySQL and PostgreSQL connector types are supported. Addition of more connector types is currently in progress.

    Create Connector - Properties (required)

    The basic connection properties for the selected connector are entered in step 2, and the properties must be validated before proceeding. Advanced connection properties are also provided in a separate section of this step. Upon successful validation, the user may proceed to the next steps (Additional properties) - or they can elect to bypass the additional properties and proceed directly to Review.

    Create Connector - Additional properties (optional)

    The Additional properties are optional and can be summarized as follows:

    • Filter definition - entry of regular expressions which define the filters for inclusion/exclusion of the items that will be included for CDC. The included items are displayed as the filters are entered and applied.

    • Data options - Snapshot and Mapping properties (optional). The defaults can be viewed and changed if desired.

    • Runtime options - Engine and Heartbeat properties (optional). The defaults can be viewed and changed if desired.

    Create Connector - Review

    The Review step provides a summary of the configuration that will be used to create the connector. If happy with the selections, click 'Finish' to create the connector. If the properties need adjustment, navigate back to the earlier steps.

    Design Considerations

    The UI is implemented as a Quarkus-based web application. The backend is configured with the URL(s) of one or more Kafka Connect clusters and provides a REST interface for the frontend. The frontend user interface uses ReactJS as the primary technology, utilizing Patternfly react components and design patterns.

    As with everything in Debezium, the UI is fully open source (Apache License Version 2.0). You can find the UI Source Code under the Debezium organization on Github.

    Trying It Out Yourself

    Debezium UI Container Image

    The Debezium UI container image is available for running the UI. E.g. run the following comand to start the UI and connect it to an existing Kafka Connect instance via Docker (where KAFKA_CONNECT_URI supplies a comma-separated list of the available Kafka Connect URI(s)):

    $ docker run -it --rm --name debezium-ui -p 8080:8080 -e KAFKA_CONNECT_URI=http://connect:8083 debezium/debezium-ui:1.7

    The UI connects to Kafka Connect via REST, so you need to make sure that the latter is reachable, e.g. by running both components on the same Docker network.

    Currently, the UI connects to un-authenticated Kafka Connect instances. Also, there’s no authorization or authentication implemented in the UI itself yet. Until that is the case, you should secure the components e.g. with your own proxy for authorization, if needed.

    Self-contained example

    We have also created a self-contained example UI demo, which is included under debezium-examples on Github. The UI demo includes a Docker Compose file which brings up several sources with data as well as the UI. Please refer to the README file for more details on running the Debezium UI demo.

    To learn more about the Debezium UI, please refer to the reference documentation

    Next Steps

    We plan to continue with improvements and new features for the UI in the coming releases. Some items under consideration:

    • Incorporation of more Debezium connector types, such as the ones for SQL Server and Oracle

    • Add capability to configure topic creation settings and single message transformations

    • Addition and improvement of connector metrics and monitoring

    • Add capability for viewing and editing connector properties after creation

    • …​And more!

    We’d also be very happy to learn about your requirements and feedback on the Debezium UI. Please let us know in the comments below, or send a message to our mailing list.

    A big thank you to the team who have contributed in many ways: Ashique Ansari, Indra Shukla, June Zhang, Na Ding, René Kerner and Gunnar Morling!

    Mark Drilling

    Mark is a software developer at Red Hat. He has spent most of his career at Red Hat working on various Data Virtualization projects, and now works on Debezium. He lives in O'Fallon, Missouri USA.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/08/23/debezium-community-stories-with-sergei-morozov/index.html b/blog/2021/08/23/debezium-community-stories-with-sergei-morozov/index.html index 9bd04c4092..f0c90e640b 100644 --- a/blog/2021/08/23/debezium-community-stories-with-sergei-morozov/index.html +++ b/blog/2021/08/23/debezium-community-stories-with-sergei-morozov/index.html @@ -1 +1 @@ - Debezium Community Stories With... Sergei Morozov

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today it’s my pleasure to talk to Sergei Morozov.

    Sergei, could you introduce yourself? What is your job, if you’re not contributing to Debezium?

    Hi, my name is Sergei, I’m a Software Architect at SugarCRM. Most of my career, I’ve been building software based on the LAMP stack. A few years ago, my team and I started building a data streaming platform meant to integrate the existing SugarCRM products and the new services we wanted to build on top of them. We started prototyping the platform with Maxwell’s Daemon, AWS Kinesis and DynamoDB and later switched to Kafka, Kafka Connect and Debezium.

    Interestingly, Debezium was the reason why we started experimenting with the Kafka ecosystem. The solution we had built before the pivot was only capable of streaming CDC changes but not snapshotting the initial state. During the work on snapshotting, we stumbled upon Debezium and discovered Kafka. After some experimentation and learning more about the ecosystem, we decided to switch the technology stack.

    What are your use cases for Debezium and change data capture in your current project?

    We capture data changes from the products based on MySQL and SQL Server and use them to enable AI and data analytics use cases. Apart from processing recent changes, we store as much historical data as possible. The data comes from thousands of customer databases hosted in the cloud environment.

    We use it for AI, analytics, and enabling future use cases. For instance, SugarPredict provides scoring of opportunities and helps sales representatives to focus on those that are more likely to close. The historical data from the CRM and other sources is used to train the AI models. The data change events are used to run the scoring process and update the prediction.

    From the data flow perspective, it looks very simple but there are quite some engineering challenges caused by the flexibility of the products and the cloud scale.

    This sounds really interesting; can you tell us more about the challenges you encountered and how you solved them?

    Absolutely. Let me dive into the details a bit. I hope our ideas and solutions will be helpful to the community.

    Flexibility and Data Serialization

    The products that provide data changes are extremely customizable. Customers can create new modules, fields, install extensions, etc. which from the CDC standpoint means that the customers have full control over the database schema. Combined with the scale of thousands of customers, it makes it challenging to use Apache Avro which implies that the schema is managed by the developers.

    A couple of years ago, we tested the then de-facto standard Schema Registry and concluded that it wouldn’t perform well at the scale of roughly a million message schemas we’d have in the cloud, not even counting schema versions the number of which is unbounded. For comparison, the accompanying managed offering for that schema registry allows to store up to a thousand schemas. So we resorted to using JSON to serialize data.

    Onboarding Challenges

    SugarCloud is a multi-tenant hosting environment for SugarCRM products. It consists of a few dozens of large MySQL-compatible AWS Aurora clusters that usually host a hundred to a thousand customer databases each. The cluster storage size varies from a few hundred gigabytes to 5 terabytes.

    When a Debezium connector for MySQL first starts, it performs the initial consistent snapshot, and to guarantee the consistency, it usually obtains a short-lived global read lock for capturing the schema of all relevant tables. Since AWS Aurora doesn’t allow to perform a global lock, Debezium has to lock all tables individually for the entire duration of the snapshot.

    The snapshot of a database cluster would take from a few hours to a couple of days which we cannot afford because it would require downtime of all the customer instances hosted on a given cluster. Fortunately, we stumbled upon the great article Debezium MySQL Snapshot For AWS RDS Aurora From Backup Snaphot by The Data Guy that describes a workaround that allowed us to snapshot all the data without causing any application downtime. We implemented a shell script that clones the database cluster, records the position in the binlog from which the clone was made, takes a snapshot of the clone and then reconfigures the connector to stream from the position of the snapshot.

    Instance Lifecycle Management

    SugarCloud is a very dynamic environment. Once a customer database has been deployed to one of the clusters, there’s no guarantee that it will remain there during its entire lifetime. A database can be backed up and restored. It can be moved between clusters in the same AWS region for load-balancing purposes. It can be moved from one AWS region to another if requested by the customer.

    Our source connectors are configured to capture all data changes from all databases on a given cluster but not all of them make sense from the data consumers' standpoint. For instance, when a database is restored from a backup on a different cluster, the INSERT statements generated by mysqldump don’t represent new rows. They represent the state of the database during the backup and should be ignored.

    In order to enable post-processing of the raw data, there is a system database on each of the clusters where the cluster management system logs all events relevant to the instance lifecycle (see the outbox pattern).

    In order to post-process the raw data according to the lifecycle events, we built a Kafka Streams application that is deployed between Debezium and the actual data consumers. Internally, it uses a state store which is effectively a projection of each customer database status (active/maintenance). Prior to restoring a database from a SQL dump, the database is marked as "in maintenance" (an event is emitted to outbox), so all corresponding INSERTs are ignored until the maintenance is over (another event emitted).

    Storage

    The need to store all historical data brings the challenge of having enough storage. Since the end of last year, we’ve collected more than 120TB of compressed CDC events. Currently we store historical data in S3 but plan to move it back to Kafka once S3-backed tiered storage (KIP-405) is available in AWS MSK.

    Infrastructure

    We run our software primarily in Kubernetes and manage all of our Kafka-related infrastructure other than brokers themselves with Strimzi. Strimzi not only allows to manage applications and Kafka resources using the same tools, it also provides a great foundation for automation.

    When we started designing the data streaming platform, one of the requirements was that it should automatically adjust to certain changes in SugarCloud. For instance, when a new Aurora cluster is deployed, the data streaming pipeline should be deployed for this cluster. Another requirement was that the pipeline should be deployed in multiple AWS regions and be managed via Sugar’s single control plane, codenamed Mothership. We went one level deeper and built the Mothership Operator that serves as the API for managing the pipeline.

    When a new Aurora cluster is created, Mothership creates a secret in Vault with the database credentials and a StackIngestor. The StackIngestor contains the information about the Aurora cluster: its AWS region, MySQL endpoint, the name of the Vault secret and other technical information. Mothership Operator subscribes to the changes in StackIngestors and manages the Kafka resources that implement the pipeline.

    With some exceptions, each pipeline is deployed to the same AWS region where the Aurora cluster is located. There are Strimzi Topic and Cluster operators deployed in each region. The pipeline consists of a few Kafka topics, a source connector (Debezium), a sink connector (S3) and runs on a shared or a dedicated Kafka Connect cluster. For each StackIngestor created in the primary region, Mothership Operator creates the needed Strimzi resources in the regional Kubernetes cluster. The Strimzi operators subscribe to the updates in their resources and manages the corresponding resources in Kafka.

    Figure 1. System Overview

    We also use Strimzi to export JMX metrics from Debezium to Prometheus. The Prometheus metrics are visualized in Grafana. We started with a community dashboard (also by The Data Guy) and improved it o better fit the multi-tenant use case.

    Figure 2. Multi-Tenant Debezium Dashboard

    You’re not only using Debezium but you’ve also contributed to the project. How was your experience doing so?

    In my experience, whatever open-source software I touch – be it at work or for fun – I always end up finding something about that software that needs to be improved to enable my use case.

    I contributed one of my first patches to Debezium (or, more precisely, to its dependency mysql-binlog-connector-java) back in October 2020. We had just rolled out one of our first connectors to production and had experienced an issue where the connector was consuming all available memory and crashing at a specific position in the binlog. The issue was quite pressing since we had a very limited time before the binlog compaction would kick in and we might start losing data. At the same time, we had just a basic understanding of the Debezium and Kafka Connect architecture and no experience with the Debezium internals.

    The whole team had swarmed in and figured out that the connector was misinterpreting a non-standard binlog event that AWS Aurora produced instead of ignoring it. Troubleshooting and finding the root cause was the hardest part. Getting the issue fixed and unit-tested was relatively easy. Although the change wasn’t that obvious, I’m glad it was accepted promptly with constructive feedback from the team.

    Are you doing other open-source work, too?

    I’m one of the maintainers of the most popular library for relational databases in PHP, Doctrine DBAL. I made my first contributions there while I was working on integrating the library into the core SugarCRM product and fixed some issues that blocked the integration. It took a few releases to get everything fixed, and at the end I got invited to the core team.

    Apart from that, I’ve been an occasional contributor to some open-source projects in the PHP ecosystem: primarily those that I would use daily like PHPBrew, PHPUnit, PHP_CodeSniffer, Vimeo Psalm and PHP itself.

    Is there anything which you’re missing in Debezium or which you’d like to see improved in the future?

    While Debezium is a great tool that covers most of the industry-standard database platforms, one the greatest challenges for our team was and still is scaling Debezium to the size of our customer base. The SQL Server connector is currently capable of handling only one logical database per connector. We have hundreds of customer databases hosted on SQL Server, but running a dedicated connector for each of them would require expensive infrastructure and would be hard to manage.

    Earlier this year, we started working with the Debezium team on improving the connector and making it capable of capturing changes from multiple databases and running multiple tasks. This way, instead of running hundreds of connectors, we could run a dozen or so. The original design is outlined in DDD-1.

    With these changes implemented, one of our production connectors captures changes from over a hundred databases. At the same time, we’re working on contributing the changes back upstream.

    Bonus question: What’s the next big thing in data engineering?

    Nowadays, especially in multi-tenant environments, it’s really hard to predict how much time it will take from "it works on my machine" to "it works at the cloud scale". I’m looking forward to the time when container orchestration and data streaming platforms become as simple to operate as they look on PowerPoint diagrams.

    Sergei, thanks a lot for taking your time, it was a pleasure to have you here!

    If you’d like to stay in touch with Sergei Morozov and discuss with him, please drop a comment below or follow and reach out to him on Twitter.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium Community Stories With... Sergei Morozov

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today it’s my pleasure to talk to Sergei Morozov.

    Sergei, could you introduce yourself? What is your job, if you’re not contributing to Debezium?

    Hi, my name is Sergei, I’m a Software Architect at SugarCRM. Most of my career, I’ve been building software based on the LAMP stack. A few years ago, my team and I started building a data streaming platform meant to integrate the existing SugarCRM products and the new services we wanted to build on top of them. We started prototyping the platform with Maxwell’s Daemon, AWS Kinesis and DynamoDB and later switched to Kafka, Kafka Connect and Debezium.

    Interestingly, Debezium was the reason why we started experimenting with the Kafka ecosystem. The solution we had built before the pivot was only capable of streaming CDC changes but not snapshotting the initial state. During the work on snapshotting, we stumbled upon Debezium and discovered Kafka. After some experimentation and learning more about the ecosystem, we decided to switch the technology stack.

    What are your use cases for Debezium and change data capture in your current project?

    We capture data changes from the products based on MySQL and SQL Server and use them to enable AI and data analytics use cases. Apart from processing recent changes, we store as much historical data as possible. The data comes from thousands of customer databases hosted in the cloud environment.

    We use it for AI, analytics, and enabling future use cases. For instance, SugarPredict provides scoring of opportunities and helps sales representatives to focus on those that are more likely to close. The historical data from the CRM and other sources is used to train the AI models. The data change events are used to run the scoring process and update the prediction.

    From the data flow perspective, it looks very simple but there are quite some engineering challenges caused by the flexibility of the products and the cloud scale.

    This sounds really interesting; can you tell us more about the challenges you encountered and how you solved them?

    Absolutely. Let me dive into the details a bit. I hope our ideas and solutions will be helpful to the community.

    Flexibility and Data Serialization

    The products that provide data changes are extremely customizable. Customers can create new modules, fields, install extensions, etc. which from the CDC standpoint means that the customers have full control over the database schema. Combined with the scale of thousands of customers, it makes it challenging to use Apache Avro which implies that the schema is managed by the developers.

    A couple of years ago, we tested the then de-facto standard Schema Registry and concluded that it wouldn’t perform well at the scale of roughly a million message schemas we’d have in the cloud, not even counting schema versions the number of which is unbounded. For comparison, the accompanying managed offering for that schema registry allows to store up to a thousand schemas. So we resorted to using JSON to serialize data.

    Onboarding Challenges

    SugarCloud is a multi-tenant hosting environment for SugarCRM products. It consists of a few dozens of large MySQL-compatible AWS Aurora clusters that usually host a hundred to a thousand customer databases each. The cluster storage size varies from a few hundred gigabytes to 5 terabytes.

    When a Debezium connector for MySQL first starts, it performs the initial consistent snapshot, and to guarantee the consistency, it usually obtains a short-lived global read lock for capturing the schema of all relevant tables. Since AWS Aurora doesn’t allow to perform a global lock, Debezium has to lock all tables individually for the entire duration of the snapshot.

    The snapshot of a database cluster would take from a few hours to a couple of days which we cannot afford because it would require downtime of all the customer instances hosted on a given cluster. Fortunately, we stumbled upon the great article Debezium MySQL Snapshot For AWS RDS Aurora From Backup Snaphot by The Data Guy that describes a workaround that allowed us to snapshot all the data without causing any application downtime. We implemented a shell script that clones the database cluster, records the position in the binlog from which the clone was made, takes a snapshot of the clone and then reconfigures the connector to stream from the position of the snapshot.

    Instance Lifecycle Management

    SugarCloud is a very dynamic environment. Once a customer database has been deployed to one of the clusters, there’s no guarantee that it will remain there during its entire lifetime. A database can be backed up and restored. It can be moved between clusters in the same AWS region for load-balancing purposes. It can be moved from one AWS region to another if requested by the customer.

    Our source connectors are configured to capture all data changes from all databases on a given cluster but not all of them make sense from the data consumers' standpoint. For instance, when a database is restored from a backup on a different cluster, the INSERT statements generated by mysqldump don’t represent new rows. They represent the state of the database during the backup and should be ignored.

    In order to enable post-processing of the raw data, there is a system database on each of the clusters where the cluster management system logs all events relevant to the instance lifecycle (see the outbox pattern).

    In order to post-process the raw data according to the lifecycle events, we built a Kafka Streams application that is deployed between Debezium and the actual data consumers. Internally, it uses a state store which is effectively a projection of each customer database status (active/maintenance). Prior to restoring a database from a SQL dump, the database is marked as "in maintenance" (an event is emitted to outbox), so all corresponding INSERTs are ignored until the maintenance is over (another event emitted).

    Storage

    The need to store all historical data brings the challenge of having enough storage. Since the end of last year, we’ve collected more than 120TB of compressed CDC events. Currently we store historical data in S3 but plan to move it back to Kafka once S3-backed tiered storage (KIP-405) is available in AWS MSK.

    Infrastructure

    We run our software primarily in Kubernetes and manage all of our Kafka-related infrastructure other than brokers themselves with Strimzi. Strimzi not only allows to manage applications and Kafka resources using the same tools, it also provides a great foundation for automation.

    When we started designing the data streaming platform, one of the requirements was that it should automatically adjust to certain changes in SugarCloud. For instance, when a new Aurora cluster is deployed, the data streaming pipeline should be deployed for this cluster. Another requirement was that the pipeline should be deployed in multiple AWS regions and be managed via Sugar’s single control plane, codenamed Mothership. We went one level deeper and built the Mothership Operator that serves as the API for managing the pipeline.

    When a new Aurora cluster is created, Mothership creates a secret in Vault with the database credentials and a StackIngestor. The StackIngestor contains the information about the Aurora cluster: its AWS region, MySQL endpoint, the name of the Vault secret and other technical information. Mothership Operator subscribes to the changes in StackIngestors and manages the Kafka resources that implement the pipeline.

    With some exceptions, each pipeline is deployed to the same AWS region where the Aurora cluster is located. There are Strimzi Topic and Cluster operators deployed in each region. The pipeline consists of a few Kafka topics, a source connector (Debezium), a sink connector (S3) and runs on a shared or a dedicated Kafka Connect cluster. For each StackIngestor created in the primary region, Mothership Operator creates the needed Strimzi resources in the regional Kubernetes cluster. The Strimzi operators subscribe to the updates in their resources and manages the corresponding resources in Kafka.

    Figure 1. System Overview

    We also use Strimzi to export JMX metrics from Debezium to Prometheus. The Prometheus metrics are visualized in Grafana. We started with a community dashboard (also by The Data Guy) and improved it o better fit the multi-tenant use case.

    Figure 2. Multi-Tenant Debezium Dashboard

    You’re not only using Debezium but you’ve also contributed to the project. How was your experience doing so?

    In my experience, whatever open-source software I touch – be it at work or for fun – I always end up finding something about that software that needs to be improved to enable my use case.

    I contributed one of my first patches to Debezium (or, more precisely, to its dependency mysql-binlog-connector-java) back in October 2020. We had just rolled out one of our first connectors to production and had experienced an issue where the connector was consuming all available memory and crashing at a specific position in the binlog. The issue was quite pressing since we had a very limited time before the binlog compaction would kick in and we might start losing data. At the same time, we had just a basic understanding of the Debezium and Kafka Connect architecture and no experience with the Debezium internals.

    The whole team had swarmed in and figured out that the connector was misinterpreting a non-standard binlog event that AWS Aurora produced instead of ignoring it. Troubleshooting and finding the root cause was the hardest part. Getting the issue fixed and unit-tested was relatively easy. Although the change wasn’t that obvious, I’m glad it was accepted promptly with constructive feedback from the team.

    Are you doing other open-source work, too?

    I’m one of the maintainers of the most popular library for relational databases in PHP, Doctrine DBAL. I made my first contributions there while I was working on integrating the library into the core SugarCRM product and fixed some issues that blocked the integration. It took a few releases to get everything fixed, and at the end I got invited to the core team.

    Apart from that, I’ve been an occasional contributor to some open-source projects in the PHP ecosystem: primarily those that I would use daily like PHPBrew, PHPUnit, PHP_CodeSniffer, Vimeo Psalm and PHP itself.

    Is there anything which you’re missing in Debezium or which you’d like to see improved in the future?

    While Debezium is a great tool that covers most of the industry-standard database platforms, one the greatest challenges for our team was and still is scaling Debezium to the size of our customer base. The SQL Server connector is currently capable of handling only one logical database per connector. We have hundreds of customer databases hosted on SQL Server, but running a dedicated connector for each of them would require expensive infrastructure and would be hard to manage.

    Earlier this year, we started working with the Debezium team on improving the connector and making it capable of capturing changes from multiple databases and running multiple tasks. This way, instead of running hundreds of connectors, we could run a dozen or so. The original design is outlined in DDD-1.

    With these changes implemented, one of our production connectors captures changes from over a hundred databases. At the same time, we’re working on contributing the changes back upstream.

    Bonus question: What’s the next big thing in data engineering?

    Nowadays, especially in multi-tenant environments, it’s really hard to predict how much time it will take from "it works on my machine" to "it works at the cloud scale". I’m looking forward to the time when container orchestration and data streaming platforms become as simple to operate as they look on PowerPoint diagrams.

    Sergei, thanks a lot for taking your time, it was a pleasure to have you here!

    If you’d like to stay in touch with Sergei Morozov and discuss with him, please drop a comment below or follow and reach out to him on Twitter.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/08/25/debezium-1-7-beta1-released/index.html b/blog/2021/08/25/debezium-1-7-beta1-released/index.html index 73889cd8e9..7214f8e717 100644 --- a/blog/2021/08/25/debezium-1-7-beta1-released/index.html +++ b/blog/2021/08/25/debezium-1-7-beta1-released/index.html @@ -1 +1 @@ - Debezium 1.7.0.Beta1 Released

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    The Debezium container images for Apache Kafka and Kafka Connect have been updated to version 2.8, too. This means that you can test Debezium with the new ZooKeeper-less mode for running Kafka (KIP-500). We’ll share more details on that in a separate post shortly.

    A large number of bug fixes and quality improvements have been made for this release; one focus area was the Debezium connector for Oracle, which received several fixes including the ability to configure multiple Oracle RAC nodes with different ports (DBZ-3813), multiple DDL parser corrections (DBZ-3877, DBZ-3893), and improved updating of SCN offsets (DBZ-3876).

    Other changes include performance improvement for the Debezium connectors for Postgres (DBZ-3870) and MongoDB (DBZ-3788), proper timezone conversions for change event timestamps in the connector for SQL Server (DBZ-3479), and more resilient handling of errors during connector start-up (DBZ-3823).

    Furthermore, this release has a breaking change for the MySQL Connector. The MySQL driver was updated to the latest version 8.0.26 with DBZ-3833. This update comes with a new timezone handling and configuration options. Detailed information can be found in the MySQL docs.

    Earlier this month, we added the Debezium UI to our regular release process. If you want to learn more about the Debezium UI have a look at our recent release announcement.

    René Kerner

    René is a software engineer at Red Hat. Before he was working as software architect and engineer at trivago and as consultant at Codecentric. Now he's part of Debezium team. He lives in Mönchengladbach, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.7.0.Beta1 Released

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    The Debezium container images for Apache Kafka and Kafka Connect have been updated to version 2.8, too. This means that you can test Debezium with the new ZooKeeper-less mode for running Kafka (KIP-500). We’ll share more details on that in a separate post shortly.

    A large number of bug fixes and quality improvements have been made for this release; one focus area was the Debezium connector for Oracle, which received several fixes including the ability to configure multiple Oracle RAC nodes with different ports (DBZ-3813), multiple DDL parser corrections (DBZ-3877, DBZ-3893), and improved updating of SCN offsets (DBZ-3876).

    Other changes include performance improvement for the Debezium connectors for Postgres (DBZ-3870) and MongoDB (DBZ-3788), proper timezone conversions for change event timestamps in the connector for SQL Server (DBZ-3479), and more resilient handling of errors during connector start-up (DBZ-3823).

    Furthermore, this release has a breaking change for the MySQL Connector. The MySQL driver was updated to the latest version 8.0.26 with DBZ-3833. This update comes with a new timezone handling and configuration options. Detailed information can be found in the MySQL docs.

    Earlier this month, we added the Debezium UI to our regular release process. If you want to learn more about the Debezium UI have a look at our recent release announcement.

    René Kerner

    René is a software engineer at Red Hat. Before he was working as software architect and engineer at trivago and as consultant at Codecentric. Now he's part of Debezium team. He lives in Mönchengladbach, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/08/31/going-zookeeperless-with-debezium-container-image-for-apache-kafka/index.html b/blog/2021/08/31/going-zookeeperless-with-debezium-container-image-for-apache-kafka/index.html index 6e1d43050e..99c7c753d5 100644 --- a/blog/2021/08/31/going-zookeeperless-with-debezium-container-image-for-apache-kafka/index.html +++ b/blog/2021/08/31/going-zookeeperless-with-debezium-container-image-for-apache-kafka/index.html @@ -67,4 +67,4 @@ - CLUSTER_ID=5Yr1SIgYQz-b-dgRabWx4g - BROKER_ID=4 - KAFKA_CONTROLLER_QUORUM_VOTERS=1@kafka-1:9093 - - NODE_ROLE=broker

    You can find extended versions of the two Compose files (combined, controller/broker) in the Debezium examples repository, also containing services for Kafka Connect and a Postgres database, and accompanied by instructions for running the Debezium tutorial with ZooKeeper-less Kafka.

    As KRaft mode matures in Kafka 3.0 and later versions, we may do some adjustments to the container image, so to support the new mode of running Kafka in the best way possible. Eventually, the option to run with ZooKeeper will be removed, but it’ll be quite some more time until then.

    To learn more about KRaft, refer to KIP-500 and related KIPs, which describe the feature and its design in great detail, the KRaft README file, the README of the Debezium 1.7 container image for Apache Kafka, and aforementioned blog post "Exploring ZooKeeper-less Kafka".

    Many thanks to René Kerner for providing feedback while writing this post.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + - NODE_ROLE=broker

    You can find extended versions of the two Compose files (combined, controller/broker) in the Debezium examples repository, also containing services for Kafka Connect and a Postgres database, and accompanied by instructions for running the Debezium tutorial with ZooKeeper-less Kafka.

    As KRaft mode matures in Kafka 3.0 and later versions, we may do some adjustments to the container image, so to support the new mode of running Kafka in the best way possible. Eventually, the option to run with ZooKeeper will be removed, but it’ll be quite some more time until then.

    To learn more about KRaft, refer to KIP-500 and related KIPs, which describe the feature and its design in great detail, the KRaft README file, the README of the Debezium 1.7 container image for Apache Kafka, and aforementioned blog post "Exploring ZooKeeper-less Kafka".

    Many thanks to René Kerner for providing feedback while writing this post.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/09/16/debezium-1-7-cr1-released/index.html b/blog/2021/09/16/debezium-1-7-cr1-released/index.html index 3eae45e929..c021e531a6 100644 --- a/blog/2021/09/16/debezium-1-7-cr1-released/index.html +++ b/blog/2021/09/16/debezium-1-7-cr1-released/index.html @@ -1 +1 @@ - Debezium 1.7.0.CR1 Released

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    Column Filtering During Snapshotting

    While the different Debezium connectors already had the capability to exclude specific columns of the captured tables from change events, these filters were only applied when processing the data within the connectors. For initial snapshots, a more efficient approach has been implemented now: tailored SQL SELECT statements will be executed for fetching only the actually included columns (DBZ-2525). This allows for significant performance gains when for instance excluding large BLOB columns from change events.

    Updated Container Image Base

    The Debezium container images for Apache Kafka, Kafka Connect, and Apache ZooKeeper are based on the Fedora 34 minimal container base image (DBZ-3939). This change became necessary as the previously used base image (derived from CentOS 7) was not maintained any longer. While this change will be transparent for most users of Debezium, some adjustments may be required for those users who derive their own custom images from the Debezium ones, e.g. when installing further packages using the operating system’s package manager. Please refer to the release notes for more details.

    Further Fixes

    As we’re approaching the 1.7 Final release, most changes have been centered around bug fixing and maturing the code base. Some of the resolved issues include:

    • Support for INVISIBLE columns as available since MySQL 8.0.23 (DBZ-3623); we’ve used that occassion to also update the Debezium example image for MySQL to version 8.0 (DBZ-3936)

    • The SQL Server allows for the usage of custom connection factories (DBZ-4001)

    • Several fixes to DML and DDL parsing for MySQL (DBZ-3969, DBZ-3984) and Oracle (DBZ-3892, DBZ-3962)

    Going forward, we’re planning to do another CR (candidate release) in a few days, followed by Debezium 1.7.0.Final at the end of the month. We’ll primarily focus on bug fixing and some asorted performance optimizations. There’ll also be some exciting improvements to the Debezium UI, which should be wrapped up for the 1.7 Final release: support for the configuration of single message transforms (SMTs), as well as the ability to configure topic creation settings.

    In parallel, we’re working on the roadmap for Debezium 1.8, planned to be released by the end of the year. Please reach out in the comments below or on the mailing list if you’d like to raise specific feature requests for this release.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.7.0.CR1 Released

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    Column Filtering During Snapshotting

    While the different Debezium connectors already had the capability to exclude specific columns of the captured tables from change events, these filters were only applied when processing the data within the connectors. For initial snapshots, a more efficient approach has been implemented now: tailored SQL SELECT statements will be executed for fetching only the actually included columns (DBZ-2525). This allows for significant performance gains when for instance excluding large BLOB columns from change events.

    Updated Container Image Base

    The Debezium container images for Apache Kafka, Kafka Connect, and Apache ZooKeeper are based on the Fedora 34 minimal container base image (DBZ-3939). This change became necessary as the previously used base image (derived from CentOS 7) was not maintained any longer. While this change will be transparent for most users of Debezium, some adjustments may be required for those users who derive their own custom images from the Debezium ones, e.g. when installing further packages using the operating system’s package manager. Please refer to the release notes for more details.

    Further Fixes

    As we’re approaching the 1.7 Final release, most changes have been centered around bug fixing and maturing the code base. Some of the resolved issues include:

    • Support for INVISIBLE columns as available since MySQL 8.0.23 (DBZ-3623); we’ve used that occassion to also update the Debezium example image for MySQL to version 8.0 (DBZ-3936)

    • The SQL Server allows for the usage of custom connection factories (DBZ-4001)

    • Several fixes to DML and DDL parsing for MySQL (DBZ-3969, DBZ-3984) and Oracle (DBZ-3892, DBZ-3962)

    Going forward, we’re planning to do another CR (candidate release) in a few days, followed by Debezium 1.7.0.Final at the end of the month. We’ll primarily focus on bug fixing and some asorted performance optimizations. There’ll also be some exciting improvements to the Debezium UI, which should be wrapped up for the 1.7 Final release: support for the configuration of single message transforms (SMTs), as well as the ability to configure topic creation settings.

    In parallel, we’re working on the roadmap for Debezium 1.8, planned to be released by the end of the year. Please reach out in the comments below or on the mailing list if you’d like to raise specific feature requests for this release.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/09/22/deep-dive-into-a-debezium-community-connector-scylla-cdc-source-connector/index.html b/blog/2021/09/22/deep-dive-into-a-debezium-community-connector-scylla-cdc-source-connector/index.html index 78b5b7e05b..69e2d31779 100644 --- a/blog/2021/09/22/deep-dive-into-a-debezium-community-connector-scylla-cdc-source-connector/index.html +++ b/blog/2021/09/22/deep-dive-into-a-debezium-community-connector-scylla-cdc-source-connector/index.html @@ -36,4 +36,4 @@ "v2": null, "v3": {"value": null}

    A null structure value represents that a column was not modified (v2 field). If the column was assigned a NULL value (v3 field), there will be a structure with a NULL value field. A non-null column assignment (v1 field) fills the contents of the value field. Such a format allows us to correctly represent all the possibilities and differentiate between assigning NULL and non-modification.

    However, most sink connectors won’t be able to correctly parse such a structure. Therefore, we decided to develop our own SMT, based on Debezium’s New Record State Extraction SMT. Our ScyllaExtractNewState SMT works by applying Debezium’s New Record State Extraction and flattening the {"value": …​} structures (at the expense of not being able to distinguish NULL value and missing column value):

    "v1": 1,
     "v2": null,
    -"v3": null

    Scylla’s CDC also supports recording pre-images and post-images with every operation (at an additional cost). We plan to add support for them in the future versions of the Scylla CDC Source Connector.

    Horizontal scaling

    Even at a stage of proof-of-concept, great performance was a paramount requirement. Scylla databases can scale to hundreds of nodes and PBs of data, so it became clear that a single Kafka Connect worker node (even multithreaded) could not handle the load of a big Scylla cluster.

    Thankfully, we took that into consideration while implementing CDC functionality in Scylla. Generally, you can think of Change Data Capture as a time-ordered queue of changes. To allow for horizontal scaling, Scylla maintains a set of multiple time-ordered queues of changes, called streams. When there is only a single consumer of the CDC log, it has to query all streams to properly read all changes. A benefit of this design is that you can introduce additional consumers, assigning a disjunct set of streams to each one of them. As a result, you can greatly increase the parallelism of processing the CDC log.

    That’s the approach we implemented in the Scylla CDC Source Connector. When starting, the connector first reads the identifiers of all available streams. Next, it distributes them among many Kafka Connect tasks (configurable by tasks.max).

    Each created Kafka Connect task (that can run on a separate Kafka Connect node) reads CDC changes from its assigned set of streams. If you double the number of tasks, each task will have to read only a half of the number of streams - half of data throughput, making it possible to handle a higher load.

    Solving large stream count problem

    While designing CDC functionality in Scylla, we had to carefully pick the number of streams that would be created. If we chose too few streams, a consumer could possibly not keep up with the data throughput of a single stream. That could also slow down INSERT, UPDATE, DELETE operations, because many concurrent operations would fight for access to a single stream. However, if Scylla created too many streams, the consumers would have to issue a large number of queries to Scylla (to cover each stream), causing unnecessary load.

    The current implementation of CDC in Scylla creates number_of_nodes * number_of_vnodes_per_node * number_of_shards streams per cluster. The number of VNodes refers to the fact that Scylla uses a Ring architecture, which has 256 VNodes per node by default. Each Scylla node consists of several independent shards, which contain their share of the node’s total data. Typically, there is one shard per each hyperthread or physical core.

    For example, if you create a 4-node i3.metal (72 vCPU per node) Scylla cluster, which is capable of roughly 600k operations per second (half INSERTs, half SELECTs), that would be: 4 * 256 * 72 = 73728 streams.

    We quickly realised that this many streams could be a problem in bigger clusters:

    1. Too many queries to Scylla - one query per each stream

    2. Too many Kafka Connect offsets - one offset per each stream. Storing offsets means the connector can resume from the last saved position after a crash.

    To mitigate those problems, we made a decision to group streams on the client side. We chose to group the streams by VNode. This reduced the count from number_of_nodes * number_of_vnodes_per_node * number_of_shards to number_of_nodes * number_of_vnodes_per_node. In the case of 4-node i3.metal that means a reduction from 73728 to 1024: only 1024 queries to Scylla and 1024 offsets stored on Kafka.

    However, we were still uneasy about the number of offsets to be stored on Kafka. When we looked into other connectors, most of them stored only a single offset or at most tens of offsets per replicated table (and as an effect having a limited scalability).

    To understand why storing thousands of streams on Kafka Connect could be a problem, let’s look at how it works under the hood. Each Kafka Connect record created by a source connector contains a key/value offset, for example: key - my_table; offset - 25, which could represent that the connector finished reading 25 rows in my_table. Periodically (configured by offset.flush.interval.ms), those offsets are flushed to a Kafka topic called connect-offsets, as regular Kafka messages.

    Unfortunately, Kafka is not a key/value store. When a connector starts up, it must scan all messages on the connect-offsets topic to find the one it needs. When it updates a previously saved offset, it just appends the new value to this topic without deleting the previous entry. It’s not a problem with connectors that have only a single offset - when updated every minute, this topic would hold roughly 10,000 messages after a week. However, in the case of the Scylla CDC Source Connector this number could be several orders of magnitude larger!

    Fortunately, this issue can be easily mitigated by setting a more aggressive compaction configuration on the connect-offsets topic. With the default configuration of retention.ms of 7 days and segment.bytes of 1GB, this topic could grow up to several hundred megabytes after just a few hours (with a Scylla cluster with tens of nodes and very small offset.flush.interval.ms). This made the connector startup time slower, as it had to scan the entire offset topic after a start/restart. By tuning the segment.bytes, segment.ms or cleanup.policy, retention.ms we were able to mitigate the problem and significantly reduce the connect-offsets topic size. The first two options specify the frequency of the log compaction process. When a segment is compacted, all messages with the same key are reduced to the latest one (the latest offset). Alternatively, setting a shorter retention time (but one that is larger than Scylla’s CDC retention time) proved to be a good option to reduce the offset topic size.

    Benchmarks: near linear scaling

    To verify that our connector can actually scale horizontally, we performed a benchmark to measure the maximum throughput of Scylla CDC Source Connector on increasingly larger Kafka Connect clusters.

    First, we started a single-node i3.4xlarge Scylla cluster (based on the official Scylla AMI). Next, we inserted 50 million rows (total size 5.33GB) to a CDC-enabled table. Later, we started an Apache Kafka 2.6.0 cluster and Kafka Connect cluster on either 1, 3 or 5 nodes (r5n.2xlarge). We started the Scylla CDC Source Connector to consume data from the previously populated CDC-enabled table and measured the time it took to produce all 50 million Kafka messages.

    Our connector was able to scale the throughput near linearly:

    Kafka cluster size Throughput Speedup

    1 node

    46k/s

    1x

    3 nodes

    129k/s

    2.8x

    5 nodes

    215k/s

    4.7x

    Conclusion

    In this blog post, we took a deep dive into the development of Scylla CDC Source Connector. We started with an overview of CDC implementation in Scylla. We have discussed the reasons we chose Debezium rather than just Kafka Connect API to build our connector, in turn making it familiar to users and Kafka-idiomatic. Next, we looked at two problems we encountered: how to represent Scylla changes and make the connector scalable.

    We are very excited to continue improving our connector even further with additional features and making it even more performant. We are eagerly looking forward to watching the Debezium ecosystem grow and integrating functionalities introduced in the latest versions of Debezium.

    If you want to check out the connector yourself, the GitHub repository with its source code is available here: github.com/scylladb/scylla-cdc-source-connector. You can learn more about Scylla here: scylladb.com.

    Piotr Grabowski

    Piotr is a software engineer working at ScyllaDB. From a young age, he participated in many competitive programming contests. At ScyllaDB, Piotr works on Kafka connectors and Scylla Java Driver.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +"v3": null

    Scylla’s CDC also supports recording pre-images and post-images with every operation (at an additional cost). We plan to add support for them in the future versions of the Scylla CDC Source Connector.

    Horizontal scaling

    Even at a stage of proof-of-concept, great performance was a paramount requirement. Scylla databases can scale to hundreds of nodes and PBs of data, so it became clear that a single Kafka Connect worker node (even multithreaded) could not handle the load of a big Scylla cluster.

    Thankfully, we took that into consideration while implementing CDC functionality in Scylla. Generally, you can think of Change Data Capture as a time-ordered queue of changes. To allow for horizontal scaling, Scylla maintains a set of multiple time-ordered queues of changes, called streams. When there is only a single consumer of the CDC log, it has to query all streams to properly read all changes. A benefit of this design is that you can introduce additional consumers, assigning a disjunct set of streams to each one of them. As a result, you can greatly increase the parallelism of processing the CDC log.

    That’s the approach we implemented in the Scylla CDC Source Connector. When starting, the connector first reads the identifiers of all available streams. Next, it distributes them among many Kafka Connect tasks (configurable by tasks.max).

    Each created Kafka Connect task (that can run on a separate Kafka Connect node) reads CDC changes from its assigned set of streams. If you double the number of tasks, each task will have to read only a half of the number of streams - half of data throughput, making it possible to handle a higher load.

    Solving large stream count problem

    While designing CDC functionality in Scylla, we had to carefully pick the number of streams that would be created. If we chose too few streams, a consumer could possibly not keep up with the data throughput of a single stream. That could also slow down INSERT, UPDATE, DELETE operations, because many concurrent operations would fight for access to a single stream. However, if Scylla created too many streams, the consumers would have to issue a large number of queries to Scylla (to cover each stream), causing unnecessary load.

    The current implementation of CDC in Scylla creates number_of_nodes * number_of_vnodes_per_node * number_of_shards streams per cluster. The number of VNodes refers to the fact that Scylla uses a Ring architecture, which has 256 VNodes per node by default. Each Scylla node consists of several independent shards, which contain their share of the node’s total data. Typically, there is one shard per each hyperthread or physical core.

    For example, if you create a 4-node i3.metal (72 vCPU per node) Scylla cluster, which is capable of roughly 600k operations per second (half INSERTs, half SELECTs), that would be: 4 * 256 * 72 = 73728 streams.

    We quickly realised that this many streams could be a problem in bigger clusters:

    1. Too many queries to Scylla - one query per each stream

    2. Too many Kafka Connect offsets - one offset per each stream. Storing offsets means the connector can resume from the last saved position after a crash.

    To mitigate those problems, we made a decision to group streams on the client side. We chose to group the streams by VNode. This reduced the count from number_of_nodes * number_of_vnodes_per_node * number_of_shards to number_of_nodes * number_of_vnodes_per_node. In the case of 4-node i3.metal that means a reduction from 73728 to 1024: only 1024 queries to Scylla and 1024 offsets stored on Kafka.

    However, we were still uneasy about the number of offsets to be stored on Kafka. When we looked into other connectors, most of them stored only a single offset or at most tens of offsets per replicated table (and as an effect having a limited scalability).

    To understand why storing thousands of streams on Kafka Connect could be a problem, let’s look at how it works under the hood. Each Kafka Connect record created by a source connector contains a key/value offset, for example: key - my_table; offset - 25, which could represent that the connector finished reading 25 rows in my_table. Periodically (configured by offset.flush.interval.ms), those offsets are flushed to a Kafka topic called connect-offsets, as regular Kafka messages.

    Unfortunately, Kafka is not a key/value store. When a connector starts up, it must scan all messages on the connect-offsets topic to find the one it needs. When it updates a previously saved offset, it just appends the new value to this topic without deleting the previous entry. It’s not a problem with connectors that have only a single offset - when updated every minute, this topic would hold roughly 10,000 messages after a week. However, in the case of the Scylla CDC Source Connector this number could be several orders of magnitude larger!

    Fortunately, this issue can be easily mitigated by setting a more aggressive compaction configuration on the connect-offsets topic. With the default configuration of retention.ms of 7 days and segment.bytes of 1GB, this topic could grow up to several hundred megabytes after just a few hours (with a Scylla cluster with tens of nodes and very small offset.flush.interval.ms). This made the connector startup time slower, as it had to scan the entire offset topic after a start/restart. By tuning the segment.bytes, segment.ms or cleanup.policy, retention.ms we were able to mitigate the problem and significantly reduce the connect-offsets topic size. The first two options specify the frequency of the log compaction process. When a segment is compacted, all messages with the same key are reduced to the latest one (the latest offset). Alternatively, setting a shorter retention time (but one that is larger than Scylla’s CDC retention time) proved to be a good option to reduce the offset topic size.

    Benchmarks: near linear scaling

    To verify that our connector can actually scale horizontally, we performed a benchmark to measure the maximum throughput of Scylla CDC Source Connector on increasingly larger Kafka Connect clusters.

    First, we started a single-node i3.4xlarge Scylla cluster (based on the official Scylla AMI). Next, we inserted 50 million rows (total size 5.33GB) to a CDC-enabled table. Later, we started an Apache Kafka 2.6.0 cluster and Kafka Connect cluster on either 1, 3 or 5 nodes (r5n.2xlarge). We started the Scylla CDC Source Connector to consume data from the previously populated CDC-enabled table and measured the time it took to produce all 50 million Kafka messages.

    Our connector was able to scale the throughput near linearly:

    Kafka cluster size Throughput Speedup

    1 node

    46k/s

    1x

    3 nodes

    129k/s

    2.8x

    5 nodes

    215k/s

    4.7x

    Conclusion

    In this blog post, we took a deep dive into the development of Scylla CDC Source Connector. We started with an overview of CDC implementation in Scylla. We have discussed the reasons we chose Debezium rather than just Kafka Connect API to build our connector, in turn making it familiar to users and Kafka-idiomatic. Next, we looked at two problems we encountered: how to represent Scylla changes and make the connector scalable.

    We are very excited to continue improving our connector even further with additional features and making it even more performant. We are eagerly looking forward to watching the Debezium ecosystem grow and integrating functionalities introduced in the latest versions of Debezium.

    If you want to check out the connector yourself, the GitHub repository with its source code is available here: github.com/scylladb/scylla-cdc-source-connector. You can learn more about Scylla here: scylladb.com.

    Piotr Grabowski

    Piotr is a software engineer working at ScyllaDB. From a young age, he participated in many competitive programming contests. At ScyllaDB, Piotr works on Kafka connectors and Scylla Java Driver.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/09/23/debezium-1-7-cr2-released/index.html b/blog/2021/09/23/debezium-1-7-cr2-released/index.html index f55ebb174f..be70a9a0b1 100644 --- a/blog/2021/09/23/debezium-1-7-cr2-released/index.html +++ b/blog/2021/09/23/debezium-1-7-cr2-released/index.html @@ -1 +1 @@ - Debezium 1.7.0.CR2 Released

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    Kafka based signalling

    Recent releases provided a new feature for MySQL - incremental snapshotting from a read-only database. The snapshot process is based on GTIDs and does not need writing to signalling table. The problem is that triggering the process still required the existence and write access to the signalling table.

    Now it is possible to send the signal via Kafka topic. This feature is available when the MySQL connector is configured with read-only = true. Please refer to the documentation for more details.

    Performance improvements

    Naveen Kumar identified multiple performance issues in the Debezium’s core critical path. He benchmarked them and provided pull requests solving them. If you are interested in details, please check DBZ-4015 and DBZ-3887.

    Further Fixes

    As we’re approaching the 1.7 Final release, most changes have been centered around bug fixing and maturing the codebase. Some of the resolved issues include:

    • Connection failure after snapshot wasn’t executed for a while (DBZ-3951)

    • Incorrect incremental snapshot DDL triggers snapshot that generates unending inserts against signalling table (DBZ-4013)

    • Several fixes to DML and DDL parsing for Oracle (DBZ-3917, DBZ-4017)

    Altogether, 14 issues have been fixed for this release. A big thank you to all contributors: Anisha Mohanty, Bob Roldan, Chris Cranford, Gunnar Morling, Jiri Novotny, Jiri Pechanec, Katerina Galieva, Naveen Kumar KR, Robert Roldan, Vivek Wassan, and Xiao Fu.

    We are on a good path towards 1.7.0.Final by the end of the next week.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.7.0.CR2 Released

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    Kafka based signalling

    Recent releases provided a new feature for MySQL - incremental snapshotting from a read-only database. The snapshot process is based on GTIDs and does not need writing to signalling table. The problem is that triggering the process still required the existence and write access to the signalling table.

    Now it is possible to send the signal via Kafka topic. This feature is available when the MySQL connector is configured with read-only = true. Please refer to the documentation for more details.

    Performance improvements

    Naveen Kumar identified multiple performance issues in the Debezium’s core critical path. He benchmarked them and provided pull requests solving them. If you are interested in details, please check DBZ-4015 and DBZ-3887.

    Further Fixes

    As we’re approaching the 1.7 Final release, most changes have been centered around bug fixing and maturing the codebase. Some of the resolved issues include:

    • Connection failure after snapshot wasn’t executed for a while (DBZ-3951)

    • Incorrect incremental snapshot DDL triggers snapshot that generates unending inserts against signalling table (DBZ-4013)

    • Several fixes to DML and DDL parsing for Oracle (DBZ-3917, DBZ-4017)

    Altogether, 14 issues have been fixed for this release. A big thank you to all contributors: Anisha Mohanty, Bob Roldan, Chris Cranford, Gunnar Morling, Jiri Novotny, Jiri Pechanec, Katerina Galieva, Naveen Kumar KR, Robert Roldan, Vivek Wassan, and Xiao Fu.

    We are on a good path towards 1.7.0.Final by the end of the next week.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/10/04/debezium-1-7-final-released/index.html b/blog/2021/10/04/debezium-1-7-final-released/index.html index e3e6440421..0efea1e9ae 100644 --- a/blog/2021/10/04/debezium-1-7-final-released/index.html +++ b/blog/2021/10/04/debezium-1-7-final-released/index.html @@ -1 +1 @@ - Debezium 1.7.0.Final Released

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    Incremental Snapshotting Improvements

    Introduced in Debezium 1.6 and based on a paper published by Netflix Engineering, incremental snapshotting addresses many long-standing feature requests around initial snapshots, such as the ability to re-snapshot specific tables, support for modifications to the include/exclude filter configuration, and resumeability of snapshots after a connector restart.

    For Debezium 1.7, incremental snapshotting has been further improved and stabilized. The Debezium MySQL connector now allows incremental snapshotting for databases without write access by the connector, which is very useful when pointing Debezium to read-only replicas. Ad-hoc snapshots can now not only be triggered via the signal table as before, but also by sending a message to a specific Kafka topic, again strengthening the support for read-only scenarios. A big thank you to Kate Galieva of Shopify Engineering for these contributions!

    Incremental snapshotting is now also supported by the Debezium connector for Oracle. Another snapshotting improvement relates to non-incremental snapshots: filtered columns are now excluded from snapshot select statements right away, which improves performance of the connector when excluding large BLOB columns for instance.

    We’ll follow up with a more detailed blog post around incremental snapshotting shortly.

    Debezium UI

    Debezium UI is part of our efforts to further simplify the experience of getting started with and operating Debezium. The UI lets you configure and start new connectors, examine the state of running connectors, and more.

    The Debezium UI team has been working tirelessly to build out this web app, with support for setting up transformations (SMTs) and topic auto creation settings coming up shortly. In the meantime please take a look at the blog post initially announcing the UI to learn more about it.

    Further Improvements

    Other improvements in Debezium 1.7 include support for NATS Streaming in Debezium Server, as well as support for Apache Kafka 2.8 in the Debezium container images. You even can use the Debezium container image for Apache Kafka to get your feet wet with running Apache Kafka without ZooKeeper!

    There’s support for MySQL INVISIBLE columns, an off-heap implementation of the transaction buffer of the Debezium connector for Oracle, allowing to process large long-running transactions, and much more. There also have been made several very nice performance improvements; a shout-out to Naveen Kumar for his continued help here, including the creation of several JMH benchmarks for measuring the impact of improvements to specific performance-sensitive areas of the code base.

    Altogether, 206 issues have been fixed for the 1.7 final and preview releases. You can find out more in the original announcement posts for Debezium 1.7.0.Alpha1, 1.7.0.Beta1, 1.7.0.CR1, and 1.7.0.CR2. Please refer to the release notes of Debezium 1.7.0.Final for the list of issues resolved since CR2 as well as procedures for upgrading from earlier versions.

    The Debezium project couldn’t exist without its amazing community of contributors from different countries all around the world! A big thank you to everyone contributing to this release in one way or another! Kudos to the following individuals from the community which contributed to the Debezium core repository in 1.7:

    Outlook

    The next Debezium release, 1.8, is planned for the end of the year. The roadmap is still in flux, but some of the features we plan to address are support for MongoDB change streams (so to support MongoDB 5.0), improved support for MariaDB, and the ability to compact large database history topics.

    We’re also planning to further build out the Debezium UI, continue the work on the Debezium connector for Oracle and making the SQL Server connector capable of dealing with multiple databases at once, and much more. Please let us know about your feature requests via the mailing list!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.7.0.Final Released

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    Incremental Snapshotting Improvements

    Introduced in Debezium 1.6 and based on a paper published by Netflix Engineering, incremental snapshotting addresses many long-standing feature requests around initial snapshots, such as the ability to re-snapshot specific tables, support for modifications to the include/exclude filter configuration, and resumeability of snapshots after a connector restart.

    For Debezium 1.7, incremental snapshotting has been further improved and stabilized. The Debezium MySQL connector now allows incremental snapshotting for databases without write access by the connector, which is very useful when pointing Debezium to read-only replicas. Ad-hoc snapshots can now not only be triggered via the signal table as before, but also by sending a message to a specific Kafka topic, again strengthening the support for read-only scenarios. A big thank you to Kate Galieva of Shopify Engineering for these contributions!

    Incremental snapshotting is now also supported by the Debezium connector for Oracle. Another snapshotting improvement relates to non-incremental snapshots: filtered columns are now excluded from snapshot select statements right away, which improves performance of the connector when excluding large BLOB columns for instance.

    We’ll follow up with a more detailed blog post around incremental snapshotting shortly.

    Debezium UI

    Debezium UI is part of our efforts to further simplify the experience of getting started with and operating Debezium. The UI lets you configure and start new connectors, examine the state of running connectors, and more.

    The Debezium UI team has been working tirelessly to build out this web app, with support for setting up transformations (SMTs) and topic auto creation settings coming up shortly. In the meantime please take a look at the blog post initially announcing the UI to learn more about it.

    Further Improvements

    Other improvements in Debezium 1.7 include support for NATS Streaming in Debezium Server, as well as support for Apache Kafka 2.8 in the Debezium container images. You even can use the Debezium container image for Apache Kafka to get your feet wet with running Apache Kafka without ZooKeeper!

    There’s support for MySQL INVISIBLE columns, an off-heap implementation of the transaction buffer of the Debezium connector for Oracle, allowing to process large long-running transactions, and much more. There also have been made several very nice performance improvements; a shout-out to Naveen Kumar for his continued help here, including the creation of several JMH benchmarks for measuring the impact of improvements to specific performance-sensitive areas of the code base.

    Altogether, 206 issues have been fixed for the 1.7 final and preview releases. You can find out more in the original announcement posts for Debezium 1.7.0.Alpha1, 1.7.0.Beta1, 1.7.0.CR1, and 1.7.0.CR2. Please refer to the release notes of Debezium 1.7.0.Final for the list of issues resolved since CR2 as well as procedures for upgrading from earlier versions.

    The Debezium project couldn’t exist without its amazing community of contributors from different countries all around the world! A big thank you to everyone contributing to this release in one way or another! Kudos to the following individuals from the community which contributed to the Debezium core repository in 1.7:

    Outlook

    The next Debezium release, 1.8, is planned for the end of the year. The roadmap is still in flux, but some of the features we plan to address are support for MongoDB change streams (so to support MongoDB 5.0), improved support for MariaDB, and the ability to compact large database history topics.

    We’re also planning to further build out the Debezium UI, continue the work on the Debezium connector for Oracle and making the SQL Server connector capable of dealing with multiple databases at once, and much more. Please let us know about your feature requests via the mailing list!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/10/07/incremental-snapshots/index.html b/blog/2021/10/07/incremental-snapshots/index.html index b8dcc3d924..72ae3c049d 100644 --- a/blog/2021/10/07/incremental-snapshots/index.html +++ b/blog/2021/10/07/incremental-snapshots/index.html @@ -66,4 +66,4 @@ --from-beginning \ --property print.key=true \ --topic dbserver1.inventory.orders

    If you were to modify any record in the orders table while the snapshot is running, this would be either emitted as a read event or as an update event, depending on the exact timing and sequence of things.

    As the last step, let’s terminate the deployed systems and close all terminals:

    # Shut down the cluster
    -docker-compose -f docker-compose-postgres.yaml down

    Summary

    In this blog post, we have discussed the motivation for the notion of incremental snapshotting, as introduced by the DBLog paper. We have reviewed the methods used in the past to achieve the described functionality. Then we dived into the deep waters of the implementation of this novel snapshotting approach in Debezium, and in the end we tried to use it live.

    We hope you will find incremental snapshotting useful and we look forward to your feedback, experiences, and use cases. In a future blog post, we’ll talk about the support for incremental snaphots of read-only databases (supported by the Debezium MySQL connector as of version 1.7) and how to trigger ad-hoc snapshots using a Kafka topic as the means of signalling instead of a database table.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +docker-compose -f docker-compose-postgres.yaml down

    Summary

    In this blog post, we have discussed the motivation for the notion of incremental snapshotting, as introduced by the DBLog paper. We have reviewed the methods used in the past to achieve the described functionality. Then we dived into the deep waters of the implementation of this novel snapshotting approach in Debezium, and in the end we tried to use it live.

    We hope you will find incremental snapshotting useful and we look forward to your feedback, experiences, and use cases. In a future blog post, we’ll talk about the support for incremental snaphots of read-only databases (supported by the Debezium MySQL connector as of version 1.7) and how to trigger ad-hoc snapshots using a Kafka topic as the means of signalling instead of a database table.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/10/20/using-debezium-create-data-lake-with-apache-iceberg/index.html b/blog/2021/10/20/using-debezium-create-data-lake-with-apache-iceberg/index.html index 209688603a..c1f42fc0f8 100644 --- a/blog/2021/10/20/using-debezium-create-data-lake-with-apache-iceberg/index.html +++ b/blog/2021/10/20/using-debezium-create-data-lake-with-apache-iceberg/index.html @@ -55,4 +55,4 @@ UPDATE prod.db.all_events SET session_time = 0, ignored = true -WHERE session_time < (SELECT min(session_time) FROM prod.db.good_events));

    Wrap-Up and Contributions

    Based on Debezium and Apache Iceberg, Debezium Server Iceberg makes it very simple to set up a low-latency data ingestion pipeline for your data lake. The project completely open-source, using the Apache 2.0 license. Debezium Server Iceberg still is a young project and there are things to improve. Please feel free to test it, give feedback, open feature requests or send pull requests. You can see more examples and start experimenting with Iceberg and Spark using this project.

    Ismail Simsek

    Ismail is a Senior Data Engineer and has been working on data analytics filed more than 10 years. He is interested in realtime data and machine learning applications. He lives in Munich Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +WHERE session_time < (SELECT min(session_time) FROM prod.db.good_events));

    Wrap-Up and Contributions

    Based on Debezium and Apache Iceberg, Debezium Server Iceberg makes it very simple to set up a low-latency data ingestion pipeline for your data lake. The project completely open-source, using the Apache 2.0 license. Debezium Server Iceberg still is a young project and there are things to improve. Please feel free to test it, give feedback, open feature requests or send pull requests. You can see more examples and start experimenting with Iceberg and Spark using this project.

    Ismail Simsek

    Ismail is a Senior Data Engineer and has been working on data analytics filed more than 10 years. He is interested in realtime data and machine learning applications. He lives in Munich Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/10/27/debezium-1-8-alpha1-released/index.html b/blog/2021/10/27/debezium-1-8-alpha1-released/index.html index 4727c78719..fef32cd0a2 100644 --- a/blog/2021/10/27/debezium-1-8-alpha1-released/index.html +++ b/blog/2021/10/27/debezium-1-8-alpha1-released/index.html @@ -102,4 +102,4 @@ "ts_ms": 1635292878244, "transaction": null } -}

    This capture mode is nearly identical to the change_streams mode except with one critical difference, the after field is populated with a complete snapshot of document. This mode is great for consumers that rely on having all fields in the source document.

    Please see the MongoDB documentation for more details on full document mode semantics.

    The full document mode is based on a re-selection of the source document when MongoDB provides the change event over the change stream to the connector. In cases where multiple changes to the same document happen within close proximity of one another, each event may have the same full document representation.

    Future work

    In conjunction to the work already done with MongoDB change streams, we recognize there is much work that remains which includes testing the new change streams implementations against MongoDB 5 and updating the connector documentation to reflect these new changes. You can expect this and much more as a part of the next preview release. As per the updated Debezium 1.8 roadmap, we’re also planning to add support for incremental snapshots to the Debezium connector for MongoDB, as well as a variant of the outbox event router which supports the event format of this connector.

    Other Fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Row hashing in LogMiner Query not able to differentiate between rows of a statement (DBZ-3834)

    • The chunk select statement is incorrect for combined primary key in incremental snapshot (DBZ-3860)

    • column.the mask.hash.hashAlgorithm.with…​. data corruption occurs when using this feature (DBZ-4033)

    • Infinispan SPI throws NPE with more than one connector configured to the same Oracle database (DBZ-4064)

    Altogether, 82 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Christopher Burch, Claus Guttesen, Fabian Martinez, Guy Korland, Harvey Yue, Hossein Torabi, Juan Fiallo, Judah Rand, Laurent Broudoux, Plugaru Tudor, Sergei Morozov, Sungho Hwang, Ünal Sürmeli, Vivek Wassan, Willie Zhu, Zongwen Li, and 陆杰.

    Outlook

    As the end of the year is just around the corner, we intend to press forward with the same vigor. We have started an open discussion regarding Debezium 2.0 on the mailing list. Your feedback is invaluable so let us know what you’d like to see added, changed, or improved! In the meantime, you can also expect a minor bugfix release to the Debezium 1.7 series next week, as well as another preview release of the Debezium 1.8 series in a couple more weeks. Happy Streaming!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    This capture mode is nearly identical to the change_streams mode except with one critical difference, the after field is populated with a complete snapshot of document. This mode is great for consumers that rely on having all fields in the source document.

    Please see the MongoDB documentation for more details on full document mode semantics.

    The full document mode is based on a re-selection of the source document when MongoDB provides the change event over the change stream to the connector. In cases where multiple changes to the same document happen within close proximity of one another, each event may have the same full document representation.

    Future work

    In conjunction to the work already done with MongoDB change streams, we recognize there is much work that remains which includes testing the new change streams implementations against MongoDB 5 and updating the connector documentation to reflect these new changes. You can expect this and much more as a part of the next preview release. As per the updated Debezium 1.8 roadmap, we’re also planning to add support for incremental snapshots to the Debezium connector for MongoDB, as well as a variant of the outbox event router which supports the event format of this connector.

    Other Fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Row hashing in LogMiner Query not able to differentiate between rows of a statement (DBZ-3834)

    • The chunk select statement is incorrect for combined primary key in incremental snapshot (DBZ-3860)

    • column.the mask.hash.hashAlgorithm.with…​. data corruption occurs when using this feature (DBZ-4033)

    • Infinispan SPI throws NPE with more than one connector configured to the same Oracle database (DBZ-4064)

    Altogether, 82 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Christopher Burch, Claus Guttesen, Fabian Martinez, Guy Korland, Harvey Yue, Hossein Torabi, Juan Fiallo, Judah Rand, Laurent Broudoux, Plugaru Tudor, Sergei Morozov, Sungho Hwang, Ünal Sürmeli, Vivek Wassan, Willie Zhu, Zongwen Li, and 陆杰.

    Outlook

    As the end of the year is just around the corner, we intend to press forward with the same vigor. We have started an open discussion regarding Debezium 2.0 on the mailing list. Your feedback is invaluable so let us know what you’d like to see added, changed, or improved! In the meantime, you can also expect a minor bugfix release to the Debezium 1.7 series next week, as well as another preview release of the Debezium 1.8 series in a couple more weeks. Happy Streaming!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/11/11/debezium-1.8-alpha2-released/index.html b/blog/2021/11/11/debezium-1.8-alpha2-released/index.html index 47232719ed..d51bb60937 100644 --- a/blog/2021/11/11/debezium-1.8-alpha2-released/index.html +++ b/blog/2021/11/11/debezium-1.8-alpha2-released/index.html @@ -1,2 +1,2 @@ Debezium 1.8.0.Alpha2 Released

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    This release does include several breaking changes. Please see the release notes for details on what changed and how to upgrade.

    MySQL heartbeat action query support

    A heartbeat action query can be enabled by supplying a heartbeat.action.query configuration option in the connector’s configuration. This property is meant to supply a SQL statement that the connector will execute periodically.

    The initial implementation of the heartbeat action query was specifically for PostgreSQL to handle dealing with WAL growth under specific conditions. But a heartbeat action query has many uses and is entirely connector or even user driven.

    For example, you may want to notify downstream consumers that your MySQL topology has changed by supplying consumers with an event with the GTID. The following configuration shows how to capture changes from the heartbeat action query table that can then be consumed easily by your CDC pipeline.

    "table.include.list": "gtid_history",
    -"heartbeat.action.query": "INSERT INTO gtid_history( select * from mysql.gtid_executed )"

    Configurable transaction topic names

    The Debezium transaction metadata topic had previously used a relatively non-configurable naming convention of <database.server.name>.transaction. While it was possible to manipulate the topic name using a single message transform (SMT) as a workaround, we felt that allowing this to be a bit more flexible in Debezium proper made sense.

    A new configuration option, transaction.topic.prefix, has been introduced that allows the connector configuration to adjust the naming of the transaction metadata topic. The configuration option value specifies what will be used as a direct replacement for the `<database.server.name>~ portion of the topic name. If this configuration option is not supplied, the prior topic naming behavior will continue to be used; requiring no changes for existing connector deployments.

    Other Fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Invalid default value error on captured table DDL with default value DBZ-3710

    • Incremental snapshot doesn’t work without primary key DBZ-4107

    • Signal based incremental snapshot is failing if database name contains dash DBZ-4244

    Altogether, 45 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Abhishek Hodavdekar, Alexander Schwartz, Andrey Yegorov, Harvey Yue, Hussain Ansari, Jiabao Sun, Jiri Novotny, Jose Luis, Juan Fiallo, Katerina Galieva, Mike Kamornikov, and Sergei Morozov!

    Debezium 1.7

    In addition to this release, we also released Debezium 1.7.1.Final, a bugfix update for the 1.7 series. The 1.7.1.Final release includes many of the bugfixes in the 1.8 series that have been done since 1.7.0.Final. For more information on what changed in 1.7.1.Final, please see the release notes.

    Outlook

    The holiday season is upon us, but we intend to stick to our release cadence as closely as possible. If you haven’t already taken an opportunity, we’d love your feedback on the open discussion regarding Debezium 2.0 on the mailing list. In the meantime, you can expect the first beta release of 1.8 in a couple of weeks.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +"heartbeat.action.query": "INSERT INTO gtid_history( select * from mysql.gtid_executed )"

    Configurable transaction topic names

    The Debezium transaction metadata topic had previously used a relatively non-configurable naming convention of <database.server.name>.transaction. While it was possible to manipulate the topic name using a single message transform (SMT) as a workaround, we felt that allowing this to be a bit more flexible in Debezium proper made sense.

    A new configuration option, transaction.topic.prefix, has been introduced that allows the connector configuration to adjust the naming of the transaction metadata topic. The configuration option value specifies what will be used as a direct replacement for the `<database.server.name>~ portion of the topic name. If this configuration option is not supplied, the prior topic naming behavior will continue to be used; requiring no changes for existing connector deployments.

    Other Fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Invalid default value error on captured table DDL with default value DBZ-3710

    • Incremental snapshot doesn’t work without primary key DBZ-4107

    • Signal based incremental snapshot is failing if database name contains dash DBZ-4244

    Altogether, 45 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Abhishek Hodavdekar, Alexander Schwartz, Andrey Yegorov, Harvey Yue, Hussain Ansari, Jiabao Sun, Jiri Novotny, Jose Luis, Juan Fiallo, Katerina Galieva, Mike Kamornikov, and Sergei Morozov!

    Debezium 1.7

    In addition to this release, we also released Debezium 1.7.1.Final, a bugfix update for the 1.7 series. The 1.7.1.Final release includes many of the bugfixes in the 1.8 series that have been done since 1.7.0.Final. For more information on what changed in 1.7.1.Final, please see the release notes.

    Outlook

    The holiday season is upon us, but we intend to stick to our release cadence as closely as possible. If you haven’t already taken an opportunity, we’d love your feedback on the open discussion regarding Debezium 2.0 on the mailing list. In the meantime, you can expect the first beta release of 1.8 in a couple of weeks.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/11/23/debezium-ui-transforms/index.html b/blog/2021/11/23/debezium-ui-transforms/index.html index 3805104314..3e02ebca4b 100644 --- a/blog/2021/11/23/debezium-ui-transforms/index.html +++ b/blog/2021/11/23/debezium-ui-transforms/index.html @@ -1 +1 @@ - Debezium UI support for Single Message Transformations

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    Single Message Transformations (SMTs)

    Connectors can be configured with transformations to make lightweight per message modifications. Common SMT use cases include format conversions (e.g. different date formats and number types), message filtering and routing, handling of "tombstone" events, encryption/decryption, etc.

    Debezium provides several single message transformations (SMTs) that you can use to either modify records before they are sent to Apache Kafka (by applying them to the Debezium connectors), or when they are read from Kafka by a sink connector. For instance we provide SMTs for extracting only the "after" part from change events and propagate that one in a flat row format and SMTs for routing the events from an outbox table.

    To learn more about the SMTs coming with Debezium, please refer to the reference documentation. And thanks to the support for SMTs in the Debezium UI, setting them up is easier than ever; For a short demo of this feature in action, see the following video:

    Fun fact: this video is the very first entry to our brand-new Debezium YouTube channel! We recommend you subscribe to the channel to never miss any new videos.

    Trying It Out Yourself

    We have created a self-contained example UI demo, which is included under debezium-examples on Github. The UI demo includes a Docker Compose file which brings up several sources with data as well as the UI. Please refer to the README file for more details on running the Debezium UI demo.

    To learn more about the Debezium UI, please refer to the reference documentation.

    Next Steps

    We plan to continue with improvements and new features for the UI in the coming releases. Some items under consideration:

    • Incorporation of more Debezium connector types, such as the ones for SQL Server and Oracle

    • Addition and improvement of connector metrics and monitoring

    • Add capability for viewing and editing connector properties after creation

    • …​And more!

    We’d also be very happy to learn about your requirements and feedback on the Debezium UI. Please let us know in the comments below, or send a message to our mailing list.

    A big thank you to the team who have contributed in many ways: Ashique Ansari, Indra Shukla, René Kerner and Gunnar Morling!

    Mark Drilling

    Mark is a software developer at Red Hat. He has spent most of his career at Red Hat working on various Data Virtualization projects, and now works on Debezium. He lives in O'Fallon, Missouri USA.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium UI support for Single Message Transformations

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    Single Message Transformations (SMTs)

    Connectors can be configured with transformations to make lightweight per message modifications. Common SMT use cases include format conversions (e.g. different date formats and number types), message filtering and routing, handling of "tombstone" events, encryption/decryption, etc.

    Debezium provides several single message transformations (SMTs) that you can use to either modify records before they are sent to Apache Kafka (by applying them to the Debezium connectors), or when they are read from Kafka by a sink connector. For instance we provide SMTs for extracting only the "after" part from change events and propagate that one in a flat row format and SMTs for routing the events from an outbox table.

    To learn more about the SMTs coming with Debezium, please refer to the reference documentation. And thanks to the support for SMTs in the Debezium UI, setting them up is easier than ever; For a short demo of this feature in action, see the following video:

    Fun fact: this video is the very first entry to our brand-new Debezium YouTube channel! We recommend you subscribe to the channel to never miss any new videos.

    Trying It Out Yourself

    We have created a self-contained example UI demo, which is included under debezium-examples on Github. The UI demo includes a Docker Compose file which brings up several sources with data as well as the UI. Please refer to the README file for more details on running the Debezium UI demo.

    To learn more about the Debezium UI, please refer to the reference documentation.

    Next Steps

    We plan to continue with improvements and new features for the UI in the coming releases. Some items under consideration:

    • Incorporation of more Debezium connector types, such as the ones for SQL Server and Oracle

    • Addition and improvement of connector metrics and monitoring

    • Add capability for viewing and editing connector properties after creation

    • …​And more!

    We’d also be very happy to learn about your requirements and feedback on the Debezium UI. Please let us know in the comments below, or send a message to our mailing list.

    A big thank you to the team who have contributed in many ways: Ashique Ansari, Indra Shukla, René Kerner and Gunnar Morling!

    Mark Drilling

    Mark is a software developer at Red Hat. He has spent most of his career at Red Hat working on various Data Virtualization projects, and now works on Debezium. He lives in O'Fallon, Missouri USA.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/11/30/debezium-1.8-beta1-released/index.html b/blog/2021/11/30/debezium-1.8-beta1-released/index.html index c8e055fc11..fc985f809a 100644 --- a/blog/2021/11/30/debezium-1.8-beta1-released/index.html +++ b/blog/2021/11/30/debezium-1.8-beta1-released/index.html @@ -48,4 +48,4 @@ "prefix": "some-prefix", "content": "c29tZSB0ZXh0" } -}

    The message contents is an arbitrary payload, besides the textual represention you also can insert binary data here. It is the responsibility of the event producer to document the format, evolve it with backwards compatibility in mind, and exchange schema information with any clients. One great way of doing so would be to take advantage of a schema registry such as Apicurio. You also could think of using a standard like CloudEvents for your logical decoding messages, which then for instance would allow an SMT such as the aforementioned outbox event router to take action based on defined attributes in the event structure.

    To learn more about support for logical decoding messages in Debezium, please refer to the connector documentation. Thanks a lot to Lairen Hightower for implementing this feature!

    Other Fixes and Changes

    Further fixes and improvements in the 1.8.0.Beta1 release include:

    • Support for configuring SMTs and topic creation settings in the Debezium UI; you can see the former in a quick video in this post, and we’ll share another demo of the topic creation UI later this week

    • Transaction metadata events in the Vitess connector (DBZ-4355); we also simplified its configuration by removing the dependency to vtctld (DBZ-4324), added support for the stop_on_reshard flag (DBZ-4295), and provided the ability to specify a VGTID as the starting point for streaming (DBZ-4297). All these changes were contributed by Yang Wu and Shichao from the Stripe engineering team, who agreed to step up as maintainers of this connector. Thanks a lot, and welcome!

    • More flexible configuration of the Infinispan-based transaction buffer of the Debezium connector for Oracle (DBZ-4169)

    • Improved type mappings for MONEY columns in Postgres (DBZ-1931) and INTERVAL columns in Oracle (DBZ-1539)

    • Support for schema changes while doing an incremental snapshot with the Debezium connector for MySQL (DBZ-4196); thanks to Kate Galieva for this very useful improvement!

    Please refer to the release notes to learn more about these and further fixes in this release.

    As always, a big thank you to everyone contributing to this release:

    Outlook

    With the Beta1 release out, we’re approaching the final phase of the 1.8 release cycle. You can expect a CR1 sometime next week, and depending on incoming issue reports, we may decide to cut the Final release either in the week before Christmas, or in the first week of 2022. In terms of features to be added, one thing we’d love to get to is incremental snapshotting support for the MongoDB connector. We’ll have to see whether this will make it in the remaining time, or whether this will have to wait for the Debezium 1.9 release. While the 1.8 release line is maturing, you also can look forward to the release of Debezium 1.7.2.

    Going forward, we’re also continuing our planning around Debezium 2.0, which should be released sometime next year. Please join the discussion on this topic on the mailing list.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    The message contents is an arbitrary payload, besides the textual represention you also can insert binary data here. It is the responsibility of the event producer to document the format, evolve it with backwards compatibility in mind, and exchange schema information with any clients. One great way of doing so would be to take advantage of a schema registry such as Apicurio. You also could think of using a standard like CloudEvents for your logical decoding messages, which then for instance would allow an SMT such as the aforementioned outbox event router to take action based on defined attributes in the event structure.

    To learn more about support for logical decoding messages in Debezium, please refer to the connector documentation. Thanks a lot to Lairen Hightower for implementing this feature!

    Other Fixes and Changes

    Further fixes and improvements in the 1.8.0.Beta1 release include:

    • Support for configuring SMTs and topic creation settings in the Debezium UI; you can see the former in a quick video in this post, and we’ll share another demo of the topic creation UI later this week

    • Transaction metadata events in the Vitess connector (DBZ-4355); we also simplified its configuration by removing the dependency to vtctld (DBZ-4324), added support for the stop_on_reshard flag (DBZ-4295), and provided the ability to specify a VGTID as the starting point for streaming (DBZ-4297). All these changes were contributed by Yang Wu and Shichao from the Stripe engineering team, who agreed to step up as maintainers of this connector. Thanks a lot, and welcome!

    • More flexible configuration of the Infinispan-based transaction buffer of the Debezium connector for Oracle (DBZ-4169)

    • Improved type mappings for MONEY columns in Postgres (DBZ-1931) and INTERVAL columns in Oracle (DBZ-1539)

    • Support for schema changes while doing an incremental snapshot with the Debezium connector for MySQL (DBZ-4196); thanks to Kate Galieva for this very useful improvement!

    Please refer to the release notes to learn more about these and further fixes in this release.

    As always, a big thank you to everyone contributing to this release:

    Outlook

    With the Beta1 release out, we’re approaching the final phase of the 1.8 release cycle. You can expect a CR1 sometime next week, and depending on incoming issue reports, we may decide to cut the Final release either in the week before Christmas, or in the first week of 2022. In terms of features to be added, one thing we’d love to get to is incremental snapshotting support for the MongoDB connector. We’ll have to see whether this will make it in the remaining time, or whether this will have to wait for the Debezium 1.9 release. While the 1.8 release line is maturing, you also can look forward to the release of Debezium 1.7.2.

    Going forward, we’re also continuing our planning around Debezium 2.0, which should be released sometime next year. Please join the discussion on this topic on the mailing list.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/12/02/debezium-ui-topic-groups/index.html b/blog/2021/12/02/debezium-ui-topic-groups/index.html index 9a288404e6..e3548a308a 100644 --- a/blog/2021/12/02/debezium-ui-topic-groups/index.html +++ b/blog/2021/12/02/debezium-ui-topic-groups/index.html @@ -1 +1 @@ - Configuring Automatic Topic Creation With the Debezium UI

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    Topic Auto-creation

    When you start a Debezium connector, the topics for the captured events are created by the Kafka broker based on a default, possibly customized, broker configuration (if auto.create.topics.enable = true). But often when you use Debezium and Kafka in a production environment, you might choose to disable Kafka’s topic auto creation capability (auto.create.topics.enable = false), or you want the connector topics to be configured differently from the default. In this case you’ll need to create topics for Debezium’s captured data sources upfront.

    Beginning with Kafka 2.6.0, Kafka Connect provides means of customizing the settings of specififc topics created by source connectors such as Debezium (KIP-158). If Kafka Connect topic creation is enabled (topic.creation.enable = true), the Debezium UI now allows you to configure connector topics using the UI.

    Kafka Connect Topic Creation

    Kafka Connect topic creation works with groups. There is a default group, which is used when there is no other group defined that matches the topic.

    You can also define multiple custom topic groups, each with it’s own configuration. Each group can specify its configuration parameters to customize how the matched topics of the group will be created. The custom groups will fall back to the default group settings for the required replication.factor and partitions properties. If the configuration for a custom topic group leaves other properties undefined, the values specified in the default group are not applied.

    To find more detail about topic auto-creation with Debezium, please refer to the reference documentation. You can also refer to this blog post for a full example. Watch the following video for a quick demo of topic creation in the Debezium UI:

    Self-contained Example

    You can try out topic auto-creation (and more) with our self-contained example UI demo - which is included under debezium-examples on GitHub. The UI demo includes a Docker Compose file which brings up several sources with data as well as the UI. Please refer to the README file for more details on running the Debezium UI demo.

    To learn more about the Debezium UI, please refer to the reference documentation.

    More coming soon!

    Stay tuned for further improvements and new features in the UI in the coming releases. Support for SQL Server and Oracle connectors are coming soon!

    A big thank you to the team who have contributed in many ways: Ashique Ansari, Indra Shukla, René Kerner and Gunnar Morling!

    Mark Drilling

    Mark is a software developer at Red Hat. He has spent most of his career at Red Hat working on various Data Virtualization projects, and now works on Debezium. He lives in O'Fallon, Missouri USA.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Configuring Automatic Topic Creation With the Debezium UI

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    Topic Auto-creation

    When you start a Debezium connector, the topics for the captured events are created by the Kafka broker based on a default, possibly customized, broker configuration (if auto.create.topics.enable = true). But often when you use Debezium and Kafka in a production environment, you might choose to disable Kafka’s topic auto creation capability (auto.create.topics.enable = false), or you want the connector topics to be configured differently from the default. In this case you’ll need to create topics for Debezium’s captured data sources upfront.

    Beginning with Kafka 2.6.0, Kafka Connect provides means of customizing the settings of specififc topics created by source connectors such as Debezium (KIP-158). If Kafka Connect topic creation is enabled (topic.creation.enable = true), the Debezium UI now allows you to configure connector topics using the UI.

    Kafka Connect Topic Creation

    Kafka Connect topic creation works with groups. There is a default group, which is used when there is no other group defined that matches the topic.

    You can also define multiple custom topic groups, each with it’s own configuration. Each group can specify its configuration parameters to customize how the matched topics of the group will be created. The custom groups will fall back to the default group settings for the required replication.factor and partitions properties. If the configuration for a custom topic group leaves other properties undefined, the values specified in the default group are not applied.

    To find more detail about topic auto-creation with Debezium, please refer to the reference documentation. You can also refer to this blog post for a full example. Watch the following video for a quick demo of topic creation in the Debezium UI:

    Self-contained Example

    You can try out topic auto-creation (and more) with our self-contained example UI demo - which is included under debezium-examples on GitHub. The UI demo includes a Docker Compose file which brings up several sources with data as well as the UI. Please refer to the README file for more details on running the Debezium UI demo.

    To learn more about the Debezium UI, please refer to the reference documentation.

    More coming soon!

    Stay tuned for further improvements and new features in the UI in the coming releases. Support for SQL Server and Oracle connectors are coming soon!

    A big thank you to the team who have contributed in many ways: Ashique Ansari, Indra Shukla, René Kerner and Gunnar Morling!

    Mark Drilling

    Mark is a software developer at Red Hat. He has spent most of his career at Red Hat working on various Data Virtualization projects, and now works on Debezium. He lives in O'Fallon, Missouri USA.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/12/09/debezium-1.8-cr1-released/index.html b/blog/2021/12/09/debezium-1.8-cr1-released/index.html index 26cf0309bc..71de7657e2 100644 --- a/blog/2021/12/09/debezium-1.8-cr1-released/index.html +++ b/blog/2021/12/09/debezium-1.8-cr1-released/index.html @@ -1 +1 @@ - Debezium 1.8.0.CR1 Released

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    MongoDB incremental snapshots

    Incremental snapshotting is a feature that we first introduced as a part of Debezium 1.6 nearly six months ago. The goals of incremental snapshots is to primarily address to very common user pain-points:

    • the necessity to execute consistent snapshots before streaming can begin upon connector restart

    • inability to trigger full or partial snapshots after connector has begun streaming

    The implementation of this feature is based on a novel approach to snapshotting originally invented by the DBLog Framework from Netflix. Debezium’s implementation is described in the design document, and we also published an in-depth blog post discussing our implementation in greater detail.

    With this release, we’re excited to finally debut this feature for MongoDB. All Debezium core connectors now support this feature; an amazing milestone!

    I’d like to thank our very own Jiri Pechanec and Kate Galieva from Shopify for their amazing efforts these last few months at refining and delivering on this feature as well as the entire community for testing and offering solid feedback.

    Further fixes

    With 1.8 Final release scheduled for next week, a vast majority of the changes in this release focus on stability and bugfixes. Some resolved issues include:

    • [Oracle] None of log files contains offset SCN (SCN offset is no longer available in the online redo logs) (DBZ-3635)

    • [Oracle] Add support for truncate in Oracle connector (DBZ-4385)

    • [Oracle] Support binary_handling_mode for Oracle connector (DBZ-4400)

    • [Debezium Server] Event Hubs exporter slow/Event data was too large (DBZ-4277)

    • [Vitess] Enforce consistent vgtid representation in Vitess connector (DBZ-4409)

    • [Vitess] VStream gRPC connection closed after being idle for a few minutes (DBZ-4389)

    • Several fixes to DML and DDL parsing for MySQL (DBZ-4397, DBZ-4402) and Oracle (DBZ-4388, DBZ-4396)

    Please refer to the release notes to learn more about these and further fixes in this release.

    As always, a big thank you to everyone contributing to this release:

    Outlook

    As the year is coming to close, we’re actively preparing some holiday treats!

    You can expect 1.7.2.Final to be released early next week including many bugfixes and improvements. Additionally, we intend to release 1.8.0.Final in the middle of next week barring no unforeseen bug reports with CR1.

    After the holiday break, we plan to be back in full swing on Debezium 1.9. Keep at eye on our road map as we’ll be updating this to include our focus for next quarter’s release cycle.

    We’re also actively working on the planning and scope of Debezium 2.0 which we intend to release sometime in 2022. We would love your feedback on any features or changes you’d like to see so join the discussion on this topic on the mailing list.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.8.0.CR1 Released

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    MongoDB incremental snapshots

    Incremental snapshotting is a feature that we first introduced as a part of Debezium 1.6 nearly six months ago. The goals of incremental snapshots is to primarily address to very common user pain-points:

    • the necessity to execute consistent snapshots before streaming can begin upon connector restart

    • inability to trigger full or partial snapshots after connector has begun streaming

    The implementation of this feature is based on a novel approach to snapshotting originally invented by the DBLog Framework from Netflix. Debezium’s implementation is described in the design document, and we also published an in-depth blog post discussing our implementation in greater detail.

    With this release, we’re excited to finally debut this feature for MongoDB. All Debezium core connectors now support this feature; an amazing milestone!

    I’d like to thank our very own Jiri Pechanec and Kate Galieva from Shopify for their amazing efforts these last few months at refining and delivering on this feature as well as the entire community for testing and offering solid feedback.

    Further fixes

    With 1.8 Final release scheduled for next week, a vast majority of the changes in this release focus on stability and bugfixes. Some resolved issues include:

    • [Oracle] None of log files contains offset SCN (SCN offset is no longer available in the online redo logs) (DBZ-3635)

    • [Oracle] Add support for truncate in Oracle connector (DBZ-4385)

    • [Oracle] Support binary_handling_mode for Oracle connector (DBZ-4400)

    • [Debezium Server] Event Hubs exporter slow/Event data was too large (DBZ-4277)

    • [Vitess] Enforce consistent vgtid representation in Vitess connector (DBZ-4409)

    • [Vitess] VStream gRPC connection closed after being idle for a few minutes (DBZ-4389)

    • Several fixes to DML and DDL parsing for MySQL (DBZ-4397, DBZ-4402) and Oracle (DBZ-4388, DBZ-4396)

    Please refer to the release notes to learn more about these and further fixes in this release.

    As always, a big thank you to everyone contributing to this release:

    Outlook

    As the year is coming to close, we’re actively preparing some holiday treats!

    You can expect 1.7.2.Final to be released early next week including many bugfixes and improvements. Additionally, we intend to release 1.8.0.Final in the middle of next week barring no unforeseen bug reports with CR1.

    After the holiday break, we plan to be back in full swing on Debezium 1.9. Keep at eye on our road map as we’ll be updating this to include our focus for next quarter’s release cycle.

    We’re also actively working on the planning and scope of Debezium 2.0 which we intend to release sometime in 2022. We would love your feedback on any features or changes you’d like to see so join the discussion on this topic on the mailing list.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/12/14/note-on-log4j-security/index.html b/blog/2021/12/14/note-on-log4j-security/index.html index 9f0f22ac83..e888ea0a39 100644 --- a/blog/2021/12/14/note-on-log4j-security/index.html +++ b/blog/2021/12/14/note-on-log4j-security/index.html @@ -1 +1 @@ - Note on log4j Security

    TL,DR: Debezium is NOT affected by the recently disclosed remote code execution vulnerability in log4j2 (CVE-2021-44228); The log4j-1.2.17.jar shipped in Debezium’s container images contains a class JMSAppender, which is subject to a MODERATE vulnerability (CVE-2021-4104). This appender is NOT used by default, i.e. access to log4j’s configuration is required in order to exploit this CVE. As a measure of caution, we have decided to remove the JMSAppender class from Debezium’s container images as of version 1.7.2.Final, released today.

    On Dec 10th, a remote code execution vulnerability in the widely used log4j2 library was published (CVE-2021-44228). Debezium, just like Apache Kafka and Kafka Connect, does not use log4j2 and therefore is NOT affected by this CVE.

    Apache Kafka, Kafka Connect and Apache ZooKeeper do use log4j 1.x though, which therefore is shipped as part of Debezium’s container images for these components. On Dec 13th, a MODERATE vulnerability in log4j 1.x was published (CVE-2021-4104), affecting the JMSAppender class coming with log4j 1.x. This vulnerability "allows a remote attacker to execute code on the server if the deployed application is configured to use JMSAppender and to the attacker’s JMS Broker".

    This appender is NOT used by default, i.e. "this flaw ONLY affects applications which are specifically configured to use JMSAppender, which is not the default, or when the attacker has write access to the Log4j configuration for adding JMSAppender to the attacker’s JMS Broker". If you are using JMSAppender, you should verify and ensure that you are using trustworthy configuration values for its TopicBindingName and TopicConnectionFactoryBindingName settings.

    Using a JMS-based appender should only very rarely occur in the context of Apache Kafka, if at all. As a measure of caution, we have therefore decided to remove the JMSAppender class from the log4j-1.2.17.jar JAR contained in Debezium’s container images for Apache Kafka, Kafka Connect, and Apache ZooKeeper. At the same time, we are also removing the SocketServer class from the log4j-1.2.17.jar, which is subject to another, unrelated CVE (CVE-2019-17571). This is a separate main class, not used in any way by Debezium, Kafka, Kafka Connect, or ZooKeeper, but we decided to not ship it any longer, thus making the Debezium container images not subject to this CVE either.

    Note that if you are running the Debezium connectors via other distributions of Apache Kafka and related components, the JMSAppender and SocketServer classes may be present in their log4j-1.2.17.jar, and you thus should make sure to either not use them at all, or only use them in safe way. Access to log4j’s configuration should be secured in an appropriate way.

    Other distributables of Debezium, such as the individual connector archives, or the Debezium Server distribution, do not contain log4j-1.2.17.jar and thus are NOT subject to the mentioned CVEs in any way.

    The removal of the JMSAppender and SocketServer classes from the log4j-1.2.17.jar shipped with Debezium’s container images is effective as of Debezium 1.7.2.Final, which was released earlier today. We recommend to update to this version to all users.

    If you have any questions around this topic, please join the discussion on this thread on the Debezium mailling list. If you have any other security-related concerns around Debezium, please do NOT publicly discuss them, but file a Jira issue with limited visibility in our bug tracker, and we will follow up with you on this as quickly as possible.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Note on log4j Security

    TL,DR: Debezium is NOT affected by the recently disclosed remote code execution vulnerability in log4j2 (CVE-2021-44228); The log4j-1.2.17.jar shipped in Debezium’s container images contains a class JMSAppender, which is subject to a MODERATE vulnerability (CVE-2021-4104). This appender is NOT used by default, i.e. access to log4j’s configuration is required in order to exploit this CVE. As a measure of caution, we have decided to remove the JMSAppender class from Debezium’s container images as of version 1.7.2.Final, released today.

    On Dec 10th, a remote code execution vulnerability in the widely used log4j2 library was published (CVE-2021-44228). Debezium, just like Apache Kafka and Kafka Connect, does not use log4j2 and therefore is NOT affected by this CVE.

    Apache Kafka, Kafka Connect and Apache ZooKeeper do use log4j 1.x though, which therefore is shipped as part of Debezium’s container images for these components. On Dec 13th, a MODERATE vulnerability in log4j 1.x was published (CVE-2021-4104), affecting the JMSAppender class coming with log4j 1.x. This vulnerability "allows a remote attacker to execute code on the server if the deployed application is configured to use JMSAppender and to the attacker’s JMS Broker".

    This appender is NOT used by default, i.e. "this flaw ONLY affects applications which are specifically configured to use JMSAppender, which is not the default, or when the attacker has write access to the Log4j configuration for adding JMSAppender to the attacker’s JMS Broker". If you are using JMSAppender, you should verify and ensure that you are using trustworthy configuration values for its TopicBindingName and TopicConnectionFactoryBindingName settings.

    Using a JMS-based appender should only very rarely occur in the context of Apache Kafka, if at all. As a measure of caution, we have therefore decided to remove the JMSAppender class from the log4j-1.2.17.jar JAR contained in Debezium’s container images for Apache Kafka, Kafka Connect, and Apache ZooKeeper. At the same time, we are also removing the SocketServer class from the log4j-1.2.17.jar, which is subject to another, unrelated CVE (CVE-2019-17571). This is a separate main class, not used in any way by Debezium, Kafka, Kafka Connect, or ZooKeeper, but we decided to not ship it any longer, thus making the Debezium container images not subject to this CVE either.

    Note that if you are running the Debezium connectors via other distributions of Apache Kafka and related components, the JMSAppender and SocketServer classes may be present in their log4j-1.2.17.jar, and you thus should make sure to either not use them at all, or only use them in safe way. Access to log4j’s configuration should be secured in an appropriate way.

    Other distributables of Debezium, such as the individual connector archives, or the Debezium Server distribution, do not contain log4j-1.2.17.jar and thus are NOT subject to the mentioned CVEs in any way.

    The removal of the JMSAppender and SocketServer classes from the log4j-1.2.17.jar shipped with Debezium’s container images is effective as of Debezium 1.7.2.Final, which was released earlier today. We recommend to update to this version to all users.

    If you have any questions around this topic, please join the discussion on this thread on the Debezium mailling list. If you have any other security-related concerns around Debezium, please do NOT publicly discuss them, but file a Jira issue with limited visibility in our bug tracker, and we will follow up with you on this as quickly as possible.

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2021/12/16/debezium-1.8-final-released/index.html b/blog/2021/12/16/debezium-1.8-final-released/index.html index e215240cf3..a6250cfac5 100644 --- a/blog/2021/12/16/debezium-1.8-final-released/index.html +++ b/blog/2021/12/16/debezium-1.8-final-released/index.html @@ -1 +1 @@ - Debezium 1.8.0.Final Released

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    Improvements to the Debezium Connector for MongoDB

    The team has made a strong push to bring multiple new features and improvements to the connector for MongoDB. It has now a brand-new capturing implementation based on MongoDB Change Streams, which allows for some very exciting new functionalities. More specifically, the connector now

    • Supports and has been tested with all the latest versions up to 5.0

    • Can optionally emit the complete document state for update events (by means of the Change Streams capability of reading back the entire document affected by change)

    • Provides support for incremental snapshots, as already known from the other Debezium connectors (more details on that in a separate blog post)

    • Helps you to implement the outbox pattern for microservices data exchange by means of an event routing SMT, specifically tailored to the event format emitted by this connector

    Further Improvements

    Besides the work on the MongoDB connector, many improvements and feature additions have been made to the other connectors. Amongst other things,

    • The names of transaction metadata topics are configurable

    • The Debezium UI has been further built out (see here and here for demos of this)

    • The Debezium connector for Postgres now supports logical decoding messages, as emitted using the pg_logical_emit_message() function

    • There’s a new snapshot mode SCHEMA_ONLY_RECOVERY for the Debezium connector for Oracle

    • The Debezium connector for Oracle supports TRUNCATE events and the binary.handling.mode option for controlling how BLOB data is exported

    • There’s support for remote Infinispan caches for buffering large Oracle transactions

    • The Debezium connector for MySQL now can export table comments; it also supports heartbeat action queries and schema changes while an incremental snapshot is running; in addition, it received many improvements to its DDL parser and character set handling

    • The Debezium connector for Vitess supports transaction metadata events, has an improved source struct, and supports re-sharding operations in a more flexible way

    Please take a look at the original release announcements (Alpha1, Alpha2, Beta1, and CR1) as well as the 1.8 release notes in order to learn more about these and other new features of this release.

    Many thanks to all the folks from the Debezium community which contributed code changes to this release:

    What’s Next?

    With another release shipped on schedule, it’s time for a break and take a rest over the upcoming holidays. We’ll be back to business in early January, with the planning for the 1.9 release being the first activity.

    Please let us know about any requirements and feature requests you may have. One area we’d like to focus on for the next release is performance benchmarking and subsequentially applying performance improvements based on that. It also looks like there will be new community-led Debezium connector for a distributed NoSQL store; stay tuned for the details around this super-exciting development!

    Later in the year, you also can expect the release of Debezium 2.0, where we’ll focus on cleaning up some inconsistencies and removing some deprecated features such as wal2json support in the Debezium connector for Postgres.

    For now, we wish everybody a happy holiday season, and, if you’re into it, Merry Christmas! Please note the core team will be on PTO mostly for the coming weeks, so replies to emails, chat messages, issue reports, and pull requests will be slower than usual. Upwards and onwards!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.8.0.Final Released

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    Improvements to the Debezium Connector for MongoDB

    The team has made a strong push to bring multiple new features and improvements to the connector for MongoDB. It has now a brand-new capturing implementation based on MongoDB Change Streams, which allows for some very exciting new functionalities. More specifically, the connector now

    • Supports and has been tested with all the latest versions up to 5.0

    • Can optionally emit the complete document state for update events (by means of the Change Streams capability of reading back the entire document affected by change)

    • Provides support for incremental snapshots, as already known from the other Debezium connectors (more details on that in a separate blog post)

    • Helps you to implement the outbox pattern for microservices data exchange by means of an event routing SMT, specifically tailored to the event format emitted by this connector

    Further Improvements

    Besides the work on the MongoDB connector, many improvements and feature additions have been made to the other connectors. Amongst other things,

    • The names of transaction metadata topics are configurable

    • The Debezium UI has been further built out (see here and here for demos of this)

    • The Debezium connector for Postgres now supports logical decoding messages, as emitted using the pg_logical_emit_message() function

    • There’s a new snapshot mode SCHEMA_ONLY_RECOVERY for the Debezium connector for Oracle

    • The Debezium connector for Oracle supports TRUNCATE events and the binary.handling.mode option for controlling how BLOB data is exported

    • There’s support for remote Infinispan caches for buffering large Oracle transactions

    • The Debezium connector for MySQL now can export table comments; it also supports heartbeat action queries and schema changes while an incremental snapshot is running; in addition, it received many improvements to its DDL parser and character set handling

    • The Debezium connector for Vitess supports transaction metadata events, has an improved source struct, and supports re-sharding operations in a more flexible way

    Please take a look at the original release announcements (Alpha1, Alpha2, Beta1, and CR1) as well as the 1.8 release notes in order to learn more about these and other new features of this release.

    Many thanks to all the folks from the Debezium community which contributed code changes to this release:

    What’s Next?

    With another release shipped on schedule, it’s time for a break and take a rest over the upcoming holidays. We’ll be back to business in early January, with the planning for the 1.9 release being the first activity.

    Please let us know about any requirements and feature requests you may have. One area we’d like to focus on for the next release is performance benchmarking and subsequentially applying performance improvements based on that. It also looks like there will be new community-led Debezium connector for a distributed NoSQL store; stay tuned for the details around this super-exciting development!

    Later in the year, you also can expect the release of Debezium 2.0, where we’ll focus on cleaning up some inconsistencies and removing some deprecated features such as wal2json support in the Debezium connector for Postgres.

    For now, we wish everybody a happy holiday season, and, if you’re into it, Merry Christmas! Please note the core team will be on PTO mostly for the coming weeks, so replies to emails, chat messages, issue reports, and pull requests will be slower than usual. Upwards and onwards!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/01/26/debezium-1-9-alpha1-released/index.html b/blog/2022/01/26/debezium-1-9-alpha1-released/index.html index badbde2b73..b53167411e 100644 --- a/blog/2022/01/26/debezium-1-9-alpha1-released/index.html +++ b/blog/2022/01/26/debezium-1-9-alpha1-released/index.html @@ -1 +1 @@ - Debezium 1.9.0.Alpha1 Released

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    Improved Metrics

    Debezium’s connectors provide a wide range of metrics. We have expanded upon the TotalNumberOfEventsSeen metric to provide a breakdown of those events by type. To support this endeavor, the following new metrics have been added:

    • TotalNumberOfCreateEventsSeen

    • TotalNumberOfUpdateEventsSeen

    • TotalNumberOfDeleteEventsSeen

    These metrics represent the number of insert, update, and delete events respectively that have occurred since the start of the connector’s streaming phase. So not only can you continue to get the total number of events aggregate, but you can now get a breakdown of that total by event type.

    Oracle ROWID data type support

    Oracle users may elect to use a ROWID data type column as an optimization to represent a relationship between the current row and the row identified by the ROWID column value. Starting with this release, columns using the ROWID data type can be captured by Debezium and emitted in change events.

    Oracle has two flavors of row identifier column data types, ROWID and UROWID. While these may be used interchangeably in some contexts, they’re very different in the context of change data capture events. Although we’ve added support for ROWID, support for UROWID remains unsupported at this time.

    Other Fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • JSON Payload not expanding when enabling it (DBZ-4457)

    • R/O incremental snapshot can blocks the binlog stream on restart (DBZ-4502)

    • Infinispan doesn’t work with underscores inside cache names (DBZ-4526)

    • Can’t process column definition with length exceeding Integer.MAX_VALUE (DBZ-4583)

    • Oracle connector can’t find the SCN (DBZ-4597)

    • Update Postgres JDBC driver to 42.3.1 (DBZ-4374)

    • Upgrade SQL Server driver to 9.4 (DBZ-4463)

    Altogether, 100 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release:

    What’s Next?

    We have started an open discussion regarding Debezium 2.0 on the mailing list. Your feedback is invaluable so let us know what you’d like to see added, changed, or improved!

    In the meantime, we’re just getting started! There will be another 1.9 pre-release in the coming weeks, sticking with our 3-week cadence. You can also expect a bugfix release sometime this quarter for 1.8 as we continue to get community feedback.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.9.0.Alpha1 Released

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    Improved Metrics

    Debezium’s connectors provide a wide range of metrics. We have expanded upon the TotalNumberOfEventsSeen metric to provide a breakdown of those events by type. To support this endeavor, the following new metrics have been added:

    • TotalNumberOfCreateEventsSeen

    • TotalNumberOfUpdateEventsSeen

    • TotalNumberOfDeleteEventsSeen

    These metrics represent the number of insert, update, and delete events respectively that have occurred since the start of the connector’s streaming phase. So not only can you continue to get the total number of events aggregate, but you can now get a breakdown of that total by event type.

    Oracle ROWID data type support

    Oracle users may elect to use a ROWID data type column as an optimization to represent a relationship between the current row and the row identified by the ROWID column value. Starting with this release, columns using the ROWID data type can be captured by Debezium and emitted in change events.

    Oracle has two flavors of row identifier column data types, ROWID and UROWID. While these may be used interchangeably in some contexts, they’re very different in the context of change data capture events. Although we’ve added support for ROWID, support for UROWID remains unsupported at this time.

    Other Fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • JSON Payload not expanding when enabling it (DBZ-4457)

    • R/O incremental snapshot can blocks the binlog stream on restart (DBZ-4502)

    • Infinispan doesn’t work with underscores inside cache names (DBZ-4526)

    • Can’t process column definition with length exceeding Integer.MAX_VALUE (DBZ-4583)

    • Oracle connector can’t find the SCN (DBZ-4597)

    • Update Postgres JDBC driver to 42.3.1 (DBZ-4374)

    • Upgrade SQL Server driver to 9.4 (DBZ-4463)

    Altogether, 100 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release:

    What’s Next?

    We have started an open discussion regarding Debezium 2.0 on the mailing list. Your feedback is invaluable so let us know what you’d like to see added, changed, or improved!

    In the meantime, we’re just getting started! There will be another 1.9 pre-release in the coming weeks, sticking with our 3-week cadence. You can also expect a bugfix release sometime this quarter for 1.8 as we continue to get community feedback.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/02/09/debezium-1-9-alpha2-released/index.html b/blog/2022/02/09/debezium-1-9-alpha2-released/index.html index 270b4b52af..38d26d6fbb 100644 --- a/blog/2022/02/09/debezium-1-9-alpha2-released/index.html +++ b/blog/2022/02/09/debezium-1-9-alpha2-released/index.html @@ -1 +1 @@ - Debezium 1.9.0.Alpha2 Released

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    Support for Oracle 21c

    The Debezium Oracle connector has been tested with the latest release of Oracle 21c, 21.3.0.0, and is compatible. If you use either the LogMiner or the Xstreams adapter, you should now be able to use Oracle’s latest flagship version and stream change events without any changes. If you are on Oracle 12 or Oracle 19 and perform a database upgrade, your connector configuration should require no changes and remain compatible.

    Configuring kafka.query.timeout.ms

    When using the Kafka Admin Client and issuing API calls, the default timeout is 3 seconds. The new kafka.query.timeout.ms field can be used to provide a custom timeout to the Kafka Admin Client to avoid possible timeout problems in environments that may use TLS or SSL encryption or where network latency causes an unexpected timeout.

    Thanks to the great work done by community member, Snigdhajyoti Ghosh.

    Improvements in Redis for Debezium Server

    We have three new fields in the Redis support for Debezium Server

    • redis.retry.initial.delay.ms

    • redis.retry.max.delay.ms

    • batch.size

    Redis allows specifying a maximum memory limit using the maxmemory configuration; however, if this field is not configured then Redis will continue to allocate memory. If all memory is consumed, an OutOfMemory exception occurs. The Redis sink now uses redis.retry.initial.delay.ms and redis.retry.max.delay.ms to set an initial and max-retry delay to be more resilient to this and connection-related issues. If you have or are experiencing such exceptions, we urge you to try these new settings to improve the sink’s resilience and experience.

    Pipeline-based transactions can substantially increase Redis queries. In order to leverage pipeline-based transactions, the batch.size configuration option can be specified which will allow Redis to write batches of change records rather than each record one by one.

    Thanks to Yossi Shirizli, for these amazing improvements.

    Other fixes

    Some notable bug fixes and upgrades are:

    • Suspected inconsistent documentation for 'Ad-hoc read-only Incremental snapshot' DBZ-4171

    • Oracle Logminer: snapshot→stream switch misses DB changes in ongoing transactions DBZ-4367

    • DDL parsing issue: ALTER TABLE …​ MODIFY PARTITION …​ DBZ-4649

    • OracleSchemaMigrationIT fails with Xstream adapter DBZ-4703

    • Migrating UI from webpack-dev-server v3 to v4 DBZ-4642

    • Upgrade postgres driver to version 42.3.2 DBZ-4658

    • Upgrade to Quarkus 2.7.0.Final DBZ-4677

    • Update shared UG deployment file for use with downstream OCP Install Guide DBZ-4700

    • Indicate ROWID is not supported by XStream DBZ-4702

    • Incremental snapshots does not honor column case sensitivity DBZ-4584

    • Build trigger issues DBZ-4672

    • Cannot expand JSON payload with nested arrays of objects DBZ-4704

    We will also be backporting the critical bugfixes to the 1.8 branch and will release Debezium 1.8.1.Final later this week.

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.9.0.Alpha2 Released

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    Support for Oracle 21c

    The Debezium Oracle connector has been tested with the latest release of Oracle 21c, 21.3.0.0, and is compatible. If you use either the LogMiner or the Xstreams adapter, you should now be able to use Oracle’s latest flagship version and stream change events without any changes. If you are on Oracle 12 or Oracle 19 and perform a database upgrade, your connector configuration should require no changes and remain compatible.

    Configuring kafka.query.timeout.ms

    When using the Kafka Admin Client and issuing API calls, the default timeout is 3 seconds. The new kafka.query.timeout.ms field can be used to provide a custom timeout to the Kafka Admin Client to avoid possible timeout problems in environments that may use TLS or SSL encryption or where network latency causes an unexpected timeout.

    Thanks to the great work done by community member, Snigdhajyoti Ghosh.

    Improvements in Redis for Debezium Server

    We have three new fields in the Redis support for Debezium Server

    • redis.retry.initial.delay.ms

    • redis.retry.max.delay.ms

    • batch.size

    Redis allows specifying a maximum memory limit using the maxmemory configuration; however, if this field is not configured then Redis will continue to allocate memory. If all memory is consumed, an OutOfMemory exception occurs. The Redis sink now uses redis.retry.initial.delay.ms and redis.retry.max.delay.ms to set an initial and max-retry delay to be more resilient to this and connection-related issues. If you have or are experiencing such exceptions, we urge you to try these new settings to improve the sink’s resilience and experience.

    Pipeline-based transactions can substantially increase Redis queries. In order to leverage pipeline-based transactions, the batch.size configuration option can be specified which will allow Redis to write batches of change records rather than each record one by one.

    Thanks to Yossi Shirizli, for these amazing improvements.

    Other fixes

    Some notable bug fixes and upgrades are:

    • Suspected inconsistent documentation for 'Ad-hoc read-only Incremental snapshot' DBZ-4171

    • Oracle Logminer: snapshot→stream switch misses DB changes in ongoing transactions DBZ-4367

    • DDL parsing issue: ALTER TABLE …​ MODIFY PARTITION …​ DBZ-4649

    • OracleSchemaMigrationIT fails with Xstream adapter DBZ-4703

    • Migrating UI from webpack-dev-server v3 to v4 DBZ-4642

    • Upgrade postgres driver to version 42.3.2 DBZ-4658

    • Upgrade to Quarkus 2.7.0.Final DBZ-4677

    • Update shared UG deployment file for use with downstream OCP Install Guide DBZ-4700

    • Indicate ROWID is not supported by XStream DBZ-4702

    • Incremental snapshots does not honor column case sensitivity DBZ-4584

    • Build trigger issues DBZ-4672

    • Cannot expand JSON payload with nested arrays of objects DBZ-4704

    We will also be backporting the critical bugfixes to the 1.8 branch and will release Debezium 1.8.1.Final later this week.

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/03/03/debezium-1-9-beta1-released/index.html b/blog/2022/03/03/debezium-1-9-beta1-released/index.html index 037886d2f1..89792b413c 100644 --- a/blog/2022/03/03/debezium-1-9-beta1-released/index.html +++ b/blog/2022/03/03/debezium-1-9-beta1-released/index.html @@ -1,2 +1,2 @@ Debezium 1.9.0.Beta1 Released

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    Debezium Server Knative Eventing

    Debezium Server has grown quite a lot since its introduction to the Debezium portfolio in version 1.2. In this release, we have added a new sink implementation to support Knative Eventing.

    Knative Eventing "provides tools and infrastructure to route events from a producer to consumers", in a very similar way in which Apache Kafka allows the exchange of events via message topics. With Debezium Server, you can now leverage the new debezium-server-http sink to deliver Debezium change data events to a Knative Broker, a Kubernetes resource that defines a mesh for collecting and distributing CloudEvents to consumers. In other words, Debezium Server can act as a "native" Knative event source.

    In order to get started with Debezium and Knative Eventing, you simply need to configure the Debezium Server with your desired source connector and then configure the sink side with the following:

    debezium.sink.type=http
    -debezium.format.value=cloudevents

    The sink will attempt to automatically detect the endpoint based on the K_SINK environment variable. If no value is defined by this variable, you can explicitly provide the end-point URL directly using:

    debezium.sink.http.url=https://<hostname>/<end-point>

    We’re super excited about this new sink connector and we look forward to all your feedback. A big thank you to Chris Baumbauer for this excellent contribution!

    Redis-managed Offsets for Debezium Server

    Several folks from Redis stepped up lately for improving the story around integrating Debezium and Redis Streams. After the performance improvements done in 1.9.0.Alpha1 (by means of batching), another result of that work is the ability to store connector offsets in Redis. For the next 1.9 early access release you can expect a database history implementation backed by Redis, and the team also is working on implementing retry support for Debezium Server. Thanks a lot to Yossi Shirizli, Oren Elias and all the other Redis folks contributing not only to the Redis Streams sink, but also to Debezium and Debezium Server at large!

    Multi-partitioned Scaling for SQL Server Connector

    Some database platforms, such as SQL Server and Oracle, support the creation and management of multiple logical databases within a single physical database server instance. Traditionally, streaming changes from the multiple logical databases required a separate connector deployment. Now there isn’t anything innately wrong with such a deployment strategy, but it can quickly start to show its shortcomings if you have many logical databases; for instance in case of multi-tenancy scenarios with one logical database per tenant, the overhead of setting up and operating one connector per database can become a bottleneck. Besides that, processing change events from multiple logical databases lends itself perfectly well to parallelization by means of Kafka Connect’s concept of tasks.

    Over the last several 1.x releases, a tremendous amount of work has gone into key fundamental changes to Debezium’s common connector framework, setting the stage for a new horizontal scaling strategy.

    One of the initial goals of this new strategy is to eliminate the need for multiple connector deployments when streaming changes from multiple logical databases within a single SQL Server instance. Additionally, it was critical to expose metrics in a way that enables monitoring tools to report on the state and health of the connector both from a connector-centric perspective but also from each logical database being processed. In this release, we’ve achieved those goals.

    But this is just the beginning folks!

    This foundation prepares the groundwork where we can move toward new horizontal scaling strategies. Debezium uses a single-task based architecture and this opens the possibilities to really harness the power of a multi-node Kafka Connect cluster and distribute chunks of work across multiple tasks. Furthermore, this can be extended to other connectors such as Oracle.

    This work has been led by the team around Sergei Morozov of SugarCRM, who already deploy the SQL Server connector in multi-partition mode built from an internal fork, which they internally maintain until the entire work has been upstreamed. We’d like to say a huge, huge thank you to Sergei, Jacob Gminder, Mike Kamornikov, and everyone else from SugarCRM who worked tirelessly to make this possible for the Debezium community, and we’re looking forward very much to continuing and further expanding this close collaboration.

    Other Fixes and Changes

    Further fixes and improvements in the 1.9.0.Beta1 release include:

    Please refer to the release notes to learn more about these and further fixes in this release.

    As always, a big thank you to everyone contributing to this release:

    Outlook

    With the Beta1 release done, we are approaching the final phase of the 1.9 release cycle. Depending on the incoming issue reports, you can expect a new release in the next few weeks to likely be CR1.

    As we turn and look ahead beyond 1.9, you can expect work on Debezium 2.0 to begin in early April 2022. The current roadmap is to devote 2 full release cycles, which means you can expect Debezium 2.0 sometime near the end of September 2022. In the meantime, you can expect regular updates to Debezium 1.9 throughout this process.

    If you are interested in Debezium 2.0, we have collected a number of items in DBZ-3899 thus far. This is not an exhaustive list nor has this list been prioritized and scoped to what you can expect in totality of 2.0; however, it is what we’ve identified to be things that either the community or the team feel are actionable tasks for this new major release. If there is something you would like to see, please take a moment and either raise a discussion on the above Jira ticket or join the discussion on this topic on our mailing list.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +debezium.format.value=cloudevents

    The sink will attempt to automatically detect the endpoint based on the K_SINK environment variable. If no value is defined by this variable, you can explicitly provide the end-point URL directly using:

    debezium.sink.http.url=https://<hostname>/<end-point>

    We’re super excited about this new sink connector and we look forward to all your feedback. A big thank you to Chris Baumbauer for this excellent contribution!

    Redis-managed Offsets for Debezium Server

    Several folks from Redis stepped up lately for improving the story around integrating Debezium and Redis Streams. After the performance improvements done in 1.9.0.Alpha1 (by means of batching), another result of that work is the ability to store connector offsets in Redis. For the next 1.9 early access release you can expect a database history implementation backed by Redis, and the team also is working on implementing retry support for Debezium Server. Thanks a lot to Yossi Shirizli, Oren Elias and all the other Redis folks contributing not only to the Redis Streams sink, but also to Debezium and Debezium Server at large!

    Multi-partitioned Scaling for SQL Server Connector

    Some database platforms, such as SQL Server and Oracle, support the creation and management of multiple logical databases within a single physical database server instance. Traditionally, streaming changes from the multiple logical databases required a separate connector deployment. Now there isn’t anything innately wrong with such a deployment strategy, but it can quickly start to show its shortcomings if you have many logical databases; for instance in case of multi-tenancy scenarios with one logical database per tenant, the overhead of setting up and operating one connector per database can become a bottleneck. Besides that, processing change events from multiple logical databases lends itself perfectly well to parallelization by means of Kafka Connect’s concept of tasks.

    Over the last several 1.x releases, a tremendous amount of work has gone into key fundamental changes to Debezium’s common connector framework, setting the stage for a new horizontal scaling strategy.

    One of the initial goals of this new strategy is to eliminate the need for multiple connector deployments when streaming changes from multiple logical databases within a single SQL Server instance. Additionally, it was critical to expose metrics in a way that enables monitoring tools to report on the state and health of the connector both from a connector-centric perspective but also from each logical database being processed. In this release, we’ve achieved those goals.

    But this is just the beginning folks!

    This foundation prepares the groundwork where we can move toward new horizontal scaling strategies. Debezium uses a single-task based architecture and this opens the possibilities to really harness the power of a multi-node Kafka Connect cluster and distribute chunks of work across multiple tasks. Furthermore, this can be extended to other connectors such as Oracle.

    This work has been led by the team around Sergei Morozov of SugarCRM, who already deploy the SQL Server connector in multi-partition mode built from an internal fork, which they internally maintain until the entire work has been upstreamed. We’d like to say a huge, huge thank you to Sergei, Jacob Gminder, Mike Kamornikov, and everyone else from SugarCRM who worked tirelessly to make this possible for the Debezium community, and we’re looking forward very much to continuing and further expanding this close collaboration.

    Other Fixes and Changes

    Further fixes and improvements in the 1.9.0.Beta1 release include:

    Please refer to the release notes to learn more about these and further fixes in this release.

    As always, a big thank you to everyone contributing to this release:

    Outlook

    With the Beta1 release done, we are approaching the final phase of the 1.9 release cycle. Depending on the incoming issue reports, you can expect a new release in the next few weeks to likely be CR1.

    As we turn and look ahead beyond 1.9, you can expect work on Debezium 2.0 to begin in early April 2022. The current roadmap is to devote 2 full release cycles, which means you can expect Debezium 2.0 sometime near the end of September 2022. In the meantime, you can expect regular updates to Debezium 1.9 throughout this process.

    If you are interested in Debezium 2.0, we have collected a number of items in DBZ-3899 thus far. This is not an exhaustive list nor has this list been prioritized and scoped to what you can expect in totality of 2.0; however, it is what we’ve identified to be things that either the community or the team feel are actionable tasks for this new major release. If there is something you would like to see, please take a moment and either raise a discussion on the above Jira ticket or join the discussion on this topic on our mailing list.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/03/15/hello-debezium/index.html b/blog/2022/03/15/hello-debezium/index.html index ddabc8a7a5..8a3ab72f1b 100644 --- a/blog/2022/03/15/hello-debezium/index.html +++ b/blog/2022/03/15/hello-debezium/index.html @@ -1 +1 @@ - Hello Debezium Team!

    Hi everyone, my name is Vojtěch Juránek and I recently joined the Debezium team.

    Most of my professional IT career I’ve spent at Red Hat. I have a background in particle physics, but I did quite a lot programming even before joining Red Hat, when working on simulations of high-energy particle collisions and their data analysis. The science is by default open and all software I was using was open source as well. Here started my love for open source.

    When I decided to do programming for a living, Red Had was a natural choice for me, as by that time it was one of the few companies which promoted open source heavily. I started to work at Red Hat as a Hudson developer. I developed and maintained many plugins and also contributed to Hudson core. I focused mainly on Hudson stability and memory footprint as I also took care about internal JBoss Hudson instance, which was the world’s largest Hudson deployment by that time. When Hudson was forked to Jenkins, I co-created and a maintained Jenkins LTS (long term support) branch. I was also a member of Jenkins CERT team.

    After a couple of years spent with Hudson/Jenkins, I decided it’s time to move on and joined Infinispan team as a quality engineer. Knowing only a little about things like memory data grid when I joined the team, I quickly discovered the beautiful world of distributed systems and fell in love with it. As a quality engineer on the Infinispan project I not only dug deep in distributed databases and consensus algorithms, but also became familiar with other very interesting projects like e.g. Jepsen.

    Later on, I accepted the challenge to discover another interesting world - the world of virtual machines and data centers and started to work as a developer on oVirt project project in the storage team. I was mostly working on low level stuff, on projects vdsm and imageio.

    Working on oVirt was interesting, but I was really excited when I got an opportunity to move back to databases and distributed systems and join the Debezium project. I’m looking forward to work on this wonderful project!

    Onwards,

    --Vojta

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Hello Debezium Team!

    Hi everyone, my name is Vojtěch Juránek and I recently joined the Debezium team.

    Most of my professional IT career I’ve spent at Red Hat. I have a background in particle physics, but I did quite a lot programming even before joining Red Hat, when working on simulations of high-energy particle collisions and their data analysis. The science is by default open and all software I was using was open source as well. Here started my love for open source.

    When I decided to do programming for a living, Red Had was a natural choice for me, as by that time it was one of the few companies which promoted open source heavily. I started to work at Red Hat as a Hudson developer. I developed and maintained many plugins and also contributed to Hudson core. I focused mainly on Hudson stability and memory footprint as I also took care about internal JBoss Hudson instance, which was the world’s largest Hudson deployment by that time. When Hudson was forked to Jenkins, I co-created and a maintained Jenkins LTS (long term support) branch. I was also a member of Jenkins CERT team.

    After a couple of years spent with Hudson/Jenkins, I decided it’s time to move on and joined Infinispan team as a quality engineer. Knowing only a little about things like memory data grid when I joined the team, I quickly discovered the beautiful world of distributed systems and fell in love with it. As a quality engineer on the Infinispan project I not only dug deep in distributed databases and consensus algorithms, but also became familiar with other very interesting projects like e.g. Jepsen.

    Later on, I accepted the challenge to discover another interesting world - the world of virtual machines and data centers and started to work as a developer on oVirt project project in the storage team. I was mostly working on low level stuff, on projects vdsm and imageio.

    Working on oVirt was interesting, but I was really excited when I got an opportunity to move back to databases and distributed systems and join the Debezium project. I’m looking forward to work on this wonderful project!

    Onwards,

    --Vojta

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/03/25/debezium-1-9-cr1-released/index.html b/blog/2022/03/25/debezium-1-9-cr1-released/index.html index ca1a90eadf..8bdb7f64da 100644 --- a/blog/2022/03/25/debezium-1-9-cr1-released/index.html +++ b/blog/2022/03/25/debezium-1-9-cr1-released/index.html @@ -24,4 +24,4 @@ <groupId>io.debezium</groupId> <artifactId>debezium-connectr-cassandra-4</artifactId> <version>1.9.0.CR1</version> -</dependency>

    We introduced a new artifact rather than a user configurable toggle as this allows both code bases to diverge as needed. This allows both the Cassandra 3 and 4 connectors to be refined as needed, as we move forward to building the Cassandra 4 connector with Java 11 as a baseline.

    The Debezium for Cassandra 4 connector is based on Apache Cassandra 4.0.2. If you intend to upgrade to Cassandra 4, the migration should be relatively seamless from Debezium’s perspective. Once the Cassandra environment has been upgraded, adjust the driver configuration as outlined in the above Cassandra 3 breaking changes section and restart the connector.hanges section and start the connector.

    We would like to thank Štefan Miklošovič and Ahmed Eljami for this contribution!

    Other Fixes and Changes

    Further fixes and improvements in the 1.9.0.CR1 release include:

    Please refer to the release notes to learn more about these and further fixes in this release.

    As always, a big thank you to everyone contributing to this release:

    Outlook

    With CR1 done, you can expect 1.9 Final either later this week or early next week depending on issue reports.

    As we begin to look ahead, you can expect work on Debezium 2.0 to begin in the near future. The current roadmap is to devote the next two release cycles on Debezium 2.0, releasing it sometime near the end of September 2022. In the meantime, expect regular updates to continue for Debezium 1.9 throughout this process.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +</dependency>

    We introduced a new artifact rather than a user configurable toggle as this allows both code bases to diverge as needed. This allows both the Cassandra 3 and 4 connectors to be refined as needed, as we move forward to building the Cassandra 4 connector with Java 11 as a baseline.

    The Debezium for Cassandra 4 connector is based on Apache Cassandra 4.0.2. If you intend to upgrade to Cassandra 4, the migration should be relatively seamless from Debezium’s perspective. Once the Cassandra environment has been upgraded, adjust the driver configuration as outlined in the above Cassandra 3 breaking changes section and restart the connector.hanges section and start the connector.

    We would like to thank Štefan Miklošovič and Ahmed Eljami for this contribution!

    Other Fixes and Changes

    Further fixes and improvements in the 1.9.0.CR1 release include:

    Please refer to the release notes to learn more about these and further fixes in this release.

    As always, a big thank you to everyone contributing to this release:

    Outlook

    With CR1 done, you can expect 1.9 Final either later this week or early next week depending on issue reports.

    As we begin to look ahead, you can expect work on Debezium 2.0 to begin in the near future. The current roadmap is to devote the next two release cycles on Debezium 2.0, releasing it sometime near the end of September 2022. In the meantime, expect regular updates to continue for Debezium 1.9 throughout this process.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/04/06/debezium-1.9-final-released/index.html b/blog/2022/04/06/debezium-1.9-final-released/index.html index 7307fb3516..f13b5ef653 100644 --- a/blog/2022/04/06/debezium-1.9-final-released/index.html +++ b/blog/2022/04/06/debezium-1.9-final-released/index.html @@ -1 +1 @@ - Debezium 1.9.0.Final Released

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    Support for Apache Cassandra 4

    Added right in time for the candidate release of Debezium 1.9, support for Cassandra 4 has been added to the Debezium Cassandra connector. Or, more specifically, a new connector has been added. I.e. you should now either download the debezium-connector-cassandra-3 or the debezium-connector-cassandra-4 connector archive, depending on your database version. While we usually strive for multi-version support within indvidual connectors, the code changes required to support the new version were that substantial, that we decided to have two separate code bases for the two connector versions (with commonalities extracted into a shared module).

    Both connectors, for Cassandra 3 and 4, remain in incubating state for the time being and you can expect further improvements to them within the near feature. A massive thank you to Štefan Miklošovič and Ahmed Eljami for this huge piece of work, which also paves the road towards moving to Java 11 as the baseline for Debezium in the near future.

    SQL Server Multi-Database Support

    SQL Server allows for setting up multiple logical databases on one physical host, which for instance comes in handy for separating the data of different tenants of a multi-tenant capable application. Historically, this required to set up one instance of the Debezium connector for SQL Server per logical database, which could become a bit cumbersome when dealing with tens or even hundreds of databases, as often the case for multi-tenancy use cases.

    Over the last year, Sergei Morozov and his team at SugarCRM reworked the Debezium SQL Server connector and the Debezium connector framework to be multi-partition aware for address sitations like this: the framework is now capable of streaming changes from multiple source partitions, which are split up between connector tasks (in Kafka Connect terminology), which in turn can be distributed amongst the worker nodes of a Kafka Connect cluster.

    In case of the SQL Server connector, a logical database equates to one such source partition, so that you now can stream for instance 20 databases from one physical SQL Server host, spread across four source tasks running on five Kafka Connect worker nodes. To use the new multi-partition mode, configure the names of the databases to capture via the new database.names connector configuration property (rather than using the previously existing database.dbname), and optionally set the value of tasks.max to a value larger than 1. Note that the schema and topic names as well as the structure of connector metrics differs between single and multi-partition mode, so as to account for the name of the logical database and the id of the source task, respectively.

    Multi-partition mode is experimental as of the 1.9 release and is planned to fully replace the legacy single partition mode for the SQL Server connector in a future release, i.e. also if you’d capture changes from only one single logical database, you’ll be using the multi-partition mode then. Multi-partition mode will also be rolled out for other connectors where it’s possible, e.g. for the connectors for Oracle and IBM Db2.

    Thanks a lot to Sergei and team for their excellent collaboration around that feature!

    Further Changes

    Let’s take a look at some more features new in Debezium 1.9. First, Debezium Server now includes a sink adaptor for HTTP, which means it can be used as a "native" event source for Knative Serving, without the need for sending messages through a message broker like Apache Kafka first.

    Then, the friendly folks over at Redis stepped up and contributed several improvements to how Debezium (Server) integrates with Redis Streams: besides several performance improvements, the database history for connectors like the MySQL one can now be stored in Redis, also offsets can be stored there now. But they didn’t stop there: for instance, Debezium Server now supports custom configuration providers, as already provided in Kafka Connect.

    Going forward, the Redis team is planning to work on further cool improvements to Debezium at large, such as better retrying logic in case of failures. Looking forward to those!

    To learn more about all the features, improvements and bug fixes shipped in Debezium 1.9, please check out the original release announcements (Alpha1, Alpha2, Beta1, and CR1) as well as the 1.9 release notes!

    Many thanks to all the folks from the Debezium community which contributed code changes to this release:

    Coming Up

    So what’s next after 1.9? You may think 1.10, but that’s not what we’ll do; instead, we’re planning to release Debezium 2.0 as a new major version later this year!

    While we don’t strictly adhere to semantic versioning (i.e. a new minor release like 1.9 may require some small degree of consideration), one of our key objectives with Debezium releases is to limit breaking changes for existing users as much as possible. That’s why for instance configuration options that became superfluous are not just removed but deprecated. The same applies for changes to the change event format, which are rolled out gradually. Over time, this has led to a number of legacy options and other aspects which we finally want to iron out. Debezium 2.0 will be the release where we will get rid of this kind of legacy cruft. For instance, we are planning to

    • Remove the legacy implementations of the connectors for MySQL and MongoDB (superseded by more capable and mature implementations based on Debezium’s standard connector framework, which have been enabled by default for quite some time)

    • Drop wal2json support for Postgres (superseded by pgoutput)

    • Use Java 11 as a baseline (for instance allowing to emit JDK Flight Recorder events for better diagnostics)

    • Default to multi-partition mode metrics (improved consistency)

    • Make default topic names more consistent, for instance for the heartbeat topic

    • Change the default type mappings for a small number of column types

    Planning for this is in full swing right now, and you are very much invited to join the discussion either on the mailing list or on the DBZ-3899 issue in Jira. Note that while we want to take the opportunity to clean up some odditities which have accumulated over time, backwards compatibility will be key concern as always, and we’ll try to minimize the impact on existing users. But as you would expect it from a new major release, upgrading may take a slightly larger effort in comparison to the usual minor releases.

    In terms of a timeline, due to the size and number of planned changes, we’re going to deviate from the usual quarterly release cadence and instead reserve two quarters for working on Debezium 2.0, i.e. you can look forward to that release at the end of September. In the meantime, there will be bugfix releases of the 1.9 version, as needed per incoming bug reports.

    Upwards and onwards!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.9.0.Final Released

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    Support for Apache Cassandra 4

    Added right in time for the candidate release of Debezium 1.9, support for Cassandra 4 has been added to the Debezium Cassandra connector. Or, more specifically, a new connector has been added. I.e. you should now either download the debezium-connector-cassandra-3 or the debezium-connector-cassandra-4 connector archive, depending on your database version. While we usually strive for multi-version support within indvidual connectors, the code changes required to support the new version were that substantial, that we decided to have two separate code bases for the two connector versions (with commonalities extracted into a shared module).

    Both connectors, for Cassandra 3 and 4, remain in incubating state for the time being and you can expect further improvements to them within the near feature. A massive thank you to Štefan Miklošovič and Ahmed Eljami for this huge piece of work, which also paves the road towards moving to Java 11 as the baseline for Debezium in the near future.

    SQL Server Multi-Database Support

    SQL Server allows for setting up multiple logical databases on one physical host, which for instance comes in handy for separating the data of different tenants of a multi-tenant capable application. Historically, this required to set up one instance of the Debezium connector for SQL Server per logical database, which could become a bit cumbersome when dealing with tens or even hundreds of databases, as often the case for multi-tenancy use cases.

    Over the last year, Sergei Morozov and his team at SugarCRM reworked the Debezium SQL Server connector and the Debezium connector framework to be multi-partition aware for address sitations like this: the framework is now capable of streaming changes from multiple source partitions, which are split up between connector tasks (in Kafka Connect terminology), which in turn can be distributed amongst the worker nodes of a Kafka Connect cluster.

    In case of the SQL Server connector, a logical database equates to one such source partition, so that you now can stream for instance 20 databases from one physical SQL Server host, spread across four source tasks running on five Kafka Connect worker nodes. To use the new multi-partition mode, configure the names of the databases to capture via the new database.names connector configuration property (rather than using the previously existing database.dbname), and optionally set the value of tasks.max to a value larger than 1. Note that the schema and topic names as well as the structure of connector metrics differs between single and multi-partition mode, so as to account for the name of the logical database and the id of the source task, respectively.

    Multi-partition mode is experimental as of the 1.9 release and is planned to fully replace the legacy single partition mode for the SQL Server connector in a future release, i.e. also if you’d capture changes from only one single logical database, you’ll be using the multi-partition mode then. Multi-partition mode will also be rolled out for other connectors where it’s possible, e.g. for the connectors for Oracle and IBM Db2.

    Thanks a lot to Sergei and team for their excellent collaboration around that feature!

    Further Changes

    Let’s take a look at some more features new in Debezium 1.9. First, Debezium Server now includes a sink adaptor for HTTP, which means it can be used as a "native" event source for Knative Serving, without the need for sending messages through a message broker like Apache Kafka first.

    Then, the friendly folks over at Redis stepped up and contributed several improvements to how Debezium (Server) integrates with Redis Streams: besides several performance improvements, the database history for connectors like the MySQL one can now be stored in Redis, also offsets can be stored there now. But they didn’t stop there: for instance, Debezium Server now supports custom configuration providers, as already provided in Kafka Connect.

    Going forward, the Redis team is planning to work on further cool improvements to Debezium at large, such as better retrying logic in case of failures. Looking forward to those!

    To learn more about all the features, improvements and bug fixes shipped in Debezium 1.9, please check out the original release announcements (Alpha1, Alpha2, Beta1, and CR1) as well as the 1.9 release notes!

    Many thanks to all the folks from the Debezium community which contributed code changes to this release:

    Coming Up

    So what’s next after 1.9? You may think 1.10, but that’s not what we’ll do; instead, we’re planning to release Debezium 2.0 as a new major version later this year!

    While we don’t strictly adhere to semantic versioning (i.e. a new minor release like 1.9 may require some small degree of consideration), one of our key objectives with Debezium releases is to limit breaking changes for existing users as much as possible. That’s why for instance configuration options that became superfluous are not just removed but deprecated. The same applies for changes to the change event format, which are rolled out gradually. Over time, this has led to a number of legacy options and other aspects which we finally want to iron out. Debezium 2.0 will be the release where we will get rid of this kind of legacy cruft. For instance, we are planning to

    • Remove the legacy implementations of the connectors for MySQL and MongoDB (superseded by more capable and mature implementations based on Debezium’s standard connector framework, which have been enabled by default for quite some time)

    • Drop wal2json support for Postgres (superseded by pgoutput)

    • Use Java 11 as a baseline (for instance allowing to emit JDK Flight Recorder events for better diagnostics)

    • Default to multi-partition mode metrics (improved consistency)

    • Make default topic names more consistent, for instance for the heartbeat topic

    • Change the default type mappings for a small number of column types

    Planning for this is in full swing right now, and you are very much invited to join the discussion either on the mailing list or on the DBZ-3899 issue in Jira. Note that while we want to take the opportunity to clean up some odditities which have accumulated over time, backwards compatibility will be key concern as always, and we’ll try to minimize the impact on existing users. But as you would expect it from a new major release, upgrading may take a slightly larger effort in comparison to the usual minor releases.

    In terms of a timeline, due to the size and number of planned changes, we’re going to deviate from the usual quarterly release cadence and instead reserve two quarters for working on Debezium 2.0, i.e. you can look forward to that release at the end of September. In the meantime, there will be bugfix releases of the 1.9 version, as needed per incoming bug reports.

    Upwards and onwards!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/04/07/read-only-incremental-snapshots/index.html b/blog/2022/04/07/read-only-incremental-snapshots/index.html index d6276147d4..72a47dfc48 100644 --- a/blog/2022/04/07/read-only-incremental-snapshots/index.html +++ b/blog/2022/04/07/read-only-incremental-snapshots/index.html @@ -121,4 +121,4 @@ --from-beginning \ --property print.key=true \ --topic dbserver1.inventory.orders

    If you were to modify any record in the orders table while the snapshot is running, this would be either emitted as a read event or as an update event, depending on the exact timing and sequence of things.

    As the last step, let’s terminate the deployed systems and close all terminals:

    # Shut down the cluster
    -docker-compose -f docker-compose-mysql.yaml down

    Conclusion

    Debezium is an excellent change data capture tool under active development, and it’s a pleasure to be a part of its community. We’re excited to use incremental snapshots in production here at Shopify. If you have similar database usage restrictions, check out the read-only incremental snapshots feature. Many thanks to my team and the Debezium team without whom this project wouldn’t happen.

    Kate Galieva

    Kate is a Senior Production Engineer working on Shopify’s Streaming Platform. She lives in Toronto, Canada.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +docker-compose -f docker-compose-mysql.yaml down

    Conclusion

    Debezium is an excellent change data capture tool under active development, and it’s a pleasure to be a part of its community. We’re excited to use incremental snapshots in production here at Shopify. If you have similar database usage restrictions, check out the read-only incremental snapshots feature. Many thanks to my team and the Debezium team without whom this project wouldn’t happen.

    Kate Galieva

    Kate is a Senior Production Engineer working on Shopify’s Streaming Platform. She lives in Toronto, Canada.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/04/21/debezium-1.9.1-final-released/index.html b/blog/2022/04/21/debezium-1.9.1-final-released/index.html index 33440d18af..1cc88a53c5 100644 --- a/blog/2022/04/21/debezium-1.9.1-final-released/index.html +++ b/blog/2022/04/21/debezium-1.9.1-final-released/index.html @@ -1 +1 @@ - Debezium 1.9.1.Final Released

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    Overview

    One of the more critical changes addresses a problem with the Oracle connector when stopping and restarting the connector. More specifically, the last committed transaction’s events would be re-emitted upon restart and should not have been (DBZ-4936).

    A second critical problem was that incremental snapshots were not working correctly for MongoDB. When an incremental snapshot signal was sent, a JSON parsing error was raised and should not have been (DBZ-5015).

    And finally, there were numerous SQL parsing errors for both MySQL and Oracle that were also addressed (DBZ-4976, DBZ-4979, DBZ-4980, DBZ-4994, DBZ-4996).

    We strongly recommend upgrading to 1.9.1.Final to avoid these issues as well as the other bugfixes that were included as a part of this release.

    Overall, 29 issues were fixed in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community which contributed to Debezium 1.9.1.Final: Lars Werkman, Andrey Pustovetov, Anisha Mohanty, Brad Morgan, Chris Cranford, Jiri Pechanec, Mohammad Yousuf Minhaj Zia, Paul Tzen, Yossi Shirizli, and chadthamn!

    Outlook

    The Debezium 1.9 release stream will remain the current long-running version for the next five months. During this time, we will continue to evaluate user reports and do micro-releases to address bugs and regressions.

    Also in the coming week, expect to hear updates about Debezium’s roadmap as well as a clear plan on Debezium 2.0, it’s preview releases and what lies ahead for the future. We have a lot in store to share, so be sure to stay tuned!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.9.1.Final Released

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    Overview

    One of the more critical changes addresses a problem with the Oracle connector when stopping and restarting the connector. More specifically, the last committed transaction’s events would be re-emitted upon restart and should not have been (DBZ-4936).

    A second critical problem was that incremental snapshots were not working correctly for MongoDB. When an incremental snapshot signal was sent, a JSON parsing error was raised and should not have been (DBZ-5015).

    And finally, there were numerous SQL parsing errors for both MySQL and Oracle that were also addressed (DBZ-4976, DBZ-4979, DBZ-4980, DBZ-4994, DBZ-4996).

    We strongly recommend upgrading to 1.9.1.Final to avoid these issues as well as the other bugfixes that were included as a part of this release.

    Overall, 29 issues were fixed in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community which contributed to Debezium 1.9.1.Final: Lars Werkman, Andrey Pustovetov, Anisha Mohanty, Brad Morgan, Chris Cranford, Jiri Pechanec, Mohammad Yousuf Minhaj Zia, Paul Tzen, Yossi Shirizli, and chadthamn!

    Outlook

    The Debezium 1.9 release stream will remain the current long-running version for the next five months. During this time, we will continue to evaluate user reports and do micro-releases to address bugs and regressions.

    Also in the coming week, expect to hear updates about Debezium’s roadmap as well as a clear plan on Debezium 2.0, it’s preview releases and what lies ahead for the future. We have a lot in store to share, so be sure to stay tuned!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/04/28/debezium-2.0-alpha1-released/index.html b/blog/2022/04/28/debezium-2.0-alpha1-released/index.html index 85b1af43c7..90b306412c 100644 --- a/blog/2022/04/28/debezium-2.0-alpha1-released/index.html +++ b/blog/2022/04/28/debezium-2.0-alpha1-released/index.html @@ -1 +1 @@ - Debezium 2.0.0.Alpha1 Released

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    Java 11 required

    We have wanted to make the jump to using Java 11 as a build requirement for quite some time now, and with Debezium 2.0 this is now possible. With Java 11, this enables us to take advantage of new language features, such as the new String API and Predicate support changes in the codebase, while also benefiting from many Java performance improvements.

    Our very own Vojtech Juranek will be publishing a blog post next week that discusses the switch to Java 11 and 17 in greater detail. I highly recommend giving it a read as it provides a deep dive into the technical background & effort that went into making this possible.

    So before migrating to Debezium 2.0, be sure that Java 11 is available.

    PostgreSQL wal2json support removed

    The PostgreSQL connector has supported several plugins throughout Debezium 1.x, including decoderbufs, wal2json, and pgoutput. PostgreSQL 9.6 recently reached end of life on November 11, 2021. This presented a great opportunity for us to review the supported decoders and to see whether we could streamline those options.

    Since pgoutput is a native decoder supported by all non-EOL versions of PostgreSQL (PG10+), it made sense to remove wal2json. Reducing the number of decoders to 2 (down from 3), allows us to streamline the code for PostgreSQL, reduces the overall maintenance cost of the connector, and gives us a much more narrow target for overall support.

    If you are still using PostgreSQL 9.6 or were using wal2json previously, you will need to migrate to at least PostgreSQL 10.0 or to decoderbufs or pgougput respectively before upgrading to Debezium 2.0.

    Legacy MySQL implementation removed

    As some of you may or may not know, we implemented the MySQL connector based on the common-connector framework back in Debezium 1.5 (Feb 2021). As a part of that re-write, we introduced the ability for MySQL users to enable the legacy connector behavior using the configuration option internal.implementation set as legacy. This legacy implementation was deprecated in favor of the new common-connector framework behavior. With Debezium 2.0, this internal.implementation configuration option and the legacy connector implementation have been removed.

    If your current connector deployment relies on this legacy implementation, you should be aware that by upgrading to Debezium 2.0, the connector will no longer use that older implementation and will use the common-connector implementation only. Feature-wise, both implementations are on-par with one another with one exception: the legacy implementation had experimental support for changing filter configurations. If you have relied on this legacy behavior, be aware that feature is no longer available.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Implement Pub/Sub Lite change consumer DBZ-4450

    • Add Google Pub/Sub emulator support DBZ-4491

    • Making Postgres PSQLException: This connection has been closed. retriable DBZ-4948

    • Should store event header timestamp in HistoryRecord DBZ-4998

    • Getting java.sql.SQLException: ORA-01291: missing logfile while running with archive log only DBZ-4879

    • Debezium uses wrong LCR format for Oracle 12.1 DBZ-4932

    • NPE caused by io.debezium.connector.oracle.antlr.listener.ColumnDefinitionParserListener.resolveColumnDataType DBZ-4976

    • Outbox Transform does not allow expanded payload with additional fields in the envelope DBZ-4989

    • CLOB with single quotes causes parser exception DBZ-4994

    • Cassandra 3 handler does not process partition deletions correctly DBZ-5022

    • SQL Server in multi-partition mode fails if a new database is added to an existing configuration DBZ-5033

    • Upgrade to Quarkus 2.8.2.Final DBZ-5062

    Altogether, 55 issues were fixed for this release.

    What’s Next?

    We have resolved the runtime problem with Debezium Server in the 1.9.1.Final release, so you can expect a 1.9.2.Final later this week which will also address other bugfixes. You can continue to expect updates to 1.9 in the weeks that follow as bugs are reported and fixes are made to address those.

    As we continue our efforts on Debezium 2.0, you can expect a second pre-release in the coming weeks, sticking to our regular 3-week cadence. In this next pre-release, we plan to focus on message schema versioning/naming, connector configuration changes with new pass-thru namespaces, removal of deprecated options, as well as unifying default value handling, just to name a few on the roadmap.

    And speaking of Debezium’s roadmap, stay tuned as we’ll have more to share about Debezium 2.0, its future releases of 2.x, all on our roadmap soon!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.0.0.Alpha1 Released

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    Java 11 required

    We have wanted to make the jump to using Java 11 as a build requirement for quite some time now, and with Debezium 2.0 this is now possible. With Java 11, this enables us to take advantage of new language features, such as the new String API and Predicate support changes in the codebase, while also benefiting from many Java performance improvements.

    Our very own Vojtech Juranek will be publishing a blog post next week that discusses the switch to Java 11 and 17 in greater detail. I highly recommend giving it a read as it provides a deep dive into the technical background & effort that went into making this possible.

    So before migrating to Debezium 2.0, be sure that Java 11 is available.

    PostgreSQL wal2json support removed

    The PostgreSQL connector has supported several plugins throughout Debezium 1.x, including decoderbufs, wal2json, and pgoutput. PostgreSQL 9.6 recently reached end of life on November 11, 2021. This presented a great opportunity for us to review the supported decoders and to see whether we could streamline those options.

    Since pgoutput is a native decoder supported by all non-EOL versions of PostgreSQL (PG10+), it made sense to remove wal2json. Reducing the number of decoders to 2 (down from 3), allows us to streamline the code for PostgreSQL, reduces the overall maintenance cost of the connector, and gives us a much more narrow target for overall support.

    If you are still using PostgreSQL 9.6 or were using wal2json previously, you will need to migrate to at least PostgreSQL 10.0 or to decoderbufs or pgougput respectively before upgrading to Debezium 2.0.

    Legacy MySQL implementation removed

    As some of you may or may not know, we implemented the MySQL connector based on the common-connector framework back in Debezium 1.5 (Feb 2021). As a part of that re-write, we introduced the ability for MySQL users to enable the legacy connector behavior using the configuration option internal.implementation set as legacy. This legacy implementation was deprecated in favor of the new common-connector framework behavior. With Debezium 2.0, this internal.implementation configuration option and the legacy connector implementation have been removed.

    If your current connector deployment relies on this legacy implementation, you should be aware that by upgrading to Debezium 2.0, the connector will no longer use that older implementation and will use the common-connector implementation only. Feature-wise, both implementations are on-par with one another with one exception: the legacy implementation had experimental support for changing filter configurations. If you have relied on this legacy behavior, be aware that feature is no longer available.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Implement Pub/Sub Lite change consumer DBZ-4450

    • Add Google Pub/Sub emulator support DBZ-4491

    • Making Postgres PSQLException: This connection has been closed. retriable DBZ-4948

    • Should store event header timestamp in HistoryRecord DBZ-4998

    • Getting java.sql.SQLException: ORA-01291: missing logfile while running with archive log only DBZ-4879

    • Debezium uses wrong LCR format for Oracle 12.1 DBZ-4932

    • NPE caused by io.debezium.connector.oracle.antlr.listener.ColumnDefinitionParserListener.resolveColumnDataType DBZ-4976

    • Outbox Transform does not allow expanded payload with additional fields in the envelope DBZ-4989

    • CLOB with single quotes causes parser exception DBZ-4994

    • Cassandra 3 handler does not process partition deletions correctly DBZ-5022

    • SQL Server in multi-partition mode fails if a new database is added to an existing configuration DBZ-5033

    • Upgrade to Quarkus 2.8.2.Final DBZ-5062

    Altogether, 55 issues were fixed for this release.

    What’s Next?

    We have resolved the runtime problem with Debezium Server in the 1.9.1.Final release, so you can expect a 1.9.2.Final later this week which will also address other bugfixes. You can continue to expect updates to 1.9 in the weeks that follow as bugs are reported and fixes are made to address those.

    As we continue our efforts on Debezium 2.0, you can expect a second pre-release in the coming weeks, sticking to our regular 3-week cadence. In this next pre-release, we plan to focus on message schema versioning/naming, connector configuration changes with new pass-thru namespaces, removal of deprecated options, as well as unifying default value handling, just to name a few on the roadmap.

    And speaking of Debezium’s roadmap, stay tuned as we’ll have more to share about Debezium 2.0, its future releases of 2.x, all on our roadmap soon!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/05/04/switch-to-java-11/index.html b/blog/2022/05/04/switch-to-java-11/index.html index 4a8a659453..eaab1257dc 100644 --- a/blog/2022/05/04/switch-to-java-11/index.html +++ b/blog/2022/05/04/switch-to-java-11/index.html @@ -20,4 +20,4 @@ CASSANDRA_SERVER_DIR + "/hints", CASSANDRA_SERVER_DIR + "/saved_caches"); cleanup.stop(); -}

    Once we solved the issue with the Cassandra tests, we were mostly done and were ready to use Java 11 in the main Debezium code and Java 17 for our tests.

    Open Issues

    We need more battle testing to be sure that everything works well with Java 11/17. Your help with testing and bug reports would be very valuable here and more than welcome. Currently we are aware of one minor unsolved issue related to the Java update. Some IDEs cannot distinguish between maven.compiler.release and maven.compiler.testRelease (or it’s not very clear to us how to set it up). For example this test using a text block is marked as an error in the IDE:

    Test using text block in IntelliJ Idea.

    You can manually set the Java level to 17, but in this case you may unintentionally use Java > 11 features in non-test code without the IDE letting you know (which admittedly isn’t too much of a problem, as the next Maven build, e.g. on CI, would catch that issue). Moreover, e.g. Idea resets the code level upon any changes in the pom.xml files. Have you solved this issue? Or do you use an IDE which doesn’t have issues with mixing different Java levels? Please share your experiences in the discussion!

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    Once we solved the issue with the Cassandra tests, we were mostly done and were ready to use Java 11 in the main Debezium code and Java 17 for our tests.

    Open Issues

    We need more battle testing to be sure that everything works well with Java 11/17. Your help with testing and bug reports would be very valuable here and more than welcome. Currently we are aware of one minor unsolved issue related to the Java update. Some IDEs cannot distinguish between maven.compiler.release and maven.compiler.testRelease (or it’s not very clear to us how to set it up). For example this test using a text block is marked as an error in the IDE:

    Test using text block in IntelliJ Idea.

    You can manually set the Java level to 17, but in this case you may unintentionally use Java > 11 features in non-test code without the IDE letting you know (which admittedly isn’t too much of a problem, as the next Maven build, e.g. on CI, would catch that issue). Moreover, e.g. Idea resets the code level upon any changes in the pom.xml files. Have you solved this issue? Or do you use an IDE which doesn’t have issues with mixing different Java levels? Please share your experiences in the discussion!

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/06/02/debezium-1-9-3-final-released/index.html b/blog/2022/06/02/debezium-1-9-3-final-released/index.html index 7e1ac732e0..6a77fa2bd8 100644 --- a/blog/2022/06/02/debezium-1-9-3-final-released/index.html +++ b/blog/2022/06/02/debezium-1-9-3-final-released/index.html @@ -7,4 +7,4 @@ transforms.outbox.type=io.debezium.connector.monogdb.transforms.outbox.MongoEventRouter value.converter=io.debezium.converters.ByteArrayConverter

    Now the data will be emitted to the broker as-is, as a byte-array that can be safely consumed by consumers. Thanks to Nathan Bradshaw for this excellent contribution!

    Heartbeat action queries with Oracle

    The heartbeat action query is a feature that allows a Debezium connector to write records to the source database and to capture those records during the event processing loop. This was first introduced for PostgreSQL to deal with situations when captured tables change less frequently than other non-captured tables, causing unintended WAL growth.

    For Oracle using the LogMiner implementation, a similar problem occurs that impacts the connector’s ability to restart due to the offset SCN not being advanced on a regular interval due to changes being made to other tables that are not captured, whether they are part of the same pluggable database or another.

    To enable heartbeat action queries, the connector must be configured with:

    heartbeat.interval.ms=1000
     heartbeat.action.query=INSERT INTO heartbeat (id) values (SYSDATE)
    -table.include.list=MYSCHEMA.HEARTBEAT,...

    The heartbeat functionality must first be enabled by specifying the heartbeat.interval.ms. This controls how often the connector generates heartbeat events. If this value is not greater-than 0, then heartbeats are disabled.

    Next, to specifically use the action query feature, the heartbeat.action.query option must be given. This specifies a SQL statement that will be executed on each heartbeat interval. This statement can be either an INSERT or an UPDATE, as long as the resulting SQL operation generates a row change.

    Finally, the action query must operate on a table that is included in the connector’s filter configuration. Like any other captured table, the table must also be configured with the correct supplemental logging so that the event is captured.

    With this configuration in place, and assuming no long-running transaction, the offset SCN will advance on each heartbeat.

    Oracle LogMiner session duration is now controllable

    The Debezium Oracle connector’s LogMiner session duration has always been based on how often the redo log switches. Generally, this behavior has worked well for most environments; however, in low traffic environments or during off-peak hours, this has the potential to re-use the same LogMiner session for a longer period of time that can lead to ORA-04030 exceptions when the PGA memory is exhausted.

    A new configuration option has been added, log.mining.session.max.ms, allowing full control over the maximum duration of an Oracle LogMiner session. When set to a value greater-than 0, the connector will automatically close and restart the mining session if the maximum duration is reached or a log switch occurs, whichever comes first. Oracle environments with low volume, particularly during off-peak hours, should no longer notice any PGA memory concerns when enabling this new option. Coordinate with your database administrator team to determine the best value based on your environment’s configuration and activity.

    Further Changes

    But that’s not all, there were also a number of bugfixes that are noteworthy, including but not limited to:

    • MySQL connector increment snapshot failed parse datetime column lenth when connector set "snapshot.fetch.size": 20000 DBZ-4939

    • InstanceAlreadyExistsException during MongoDb connector metrics registration DBZ-5011

    • DateTimeParseException: Text 'infinity' could not be parsed in Postgres connector DBZ-5014

    • 4 Connections per connector (postgres) DBZ-5074

    • Oracle Logminer: records missed during switch from snapshot to streaming mode DBZ-5085

    • Cannot Set debezium.sink.kafka.producer.ssl.endpoint.identification.algorithm to empty value DBZ-5105

    • MilliSecondsBehindSource is not reported by SQL Server connector DBZ-5137

    • ExtractNewRecordState SMT Replaces Null Value with Column’s Default Value DBZ-5166

    • Oracle connector metrics tracking of rollback and abandoned transactions may cause high memory usage DBZ-5179

    We strongly recommend upgrading to 1.9.3.Final to get the latest improvements both in performance and stability.

    Overall, 47 issues were fixed in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Outlook

    The Debezium 1.9 release stream will remain the current long-running version for the next three months. During this time, we will continue to evaluate user reports and do micro-releases to address bugs and regressions depending on severity.

    Also, quite a lot of work has gone into Debezium 2.0. We intend to release Debezium 2.0.0.Alpha2 next week, will releases to follow about every three weeks thereafter.

    Stay tuned for more in the coming weeks and stay cool out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +table.include.list=MYSCHEMA.HEARTBEAT,...

    The heartbeat functionality must first be enabled by specifying the heartbeat.interval.ms. This controls how often the connector generates heartbeat events. If this value is not greater-than 0, then heartbeats are disabled.

    Next, to specifically use the action query feature, the heartbeat.action.query option must be given. This specifies a SQL statement that will be executed on each heartbeat interval. This statement can be either an INSERT or an UPDATE, as long as the resulting SQL operation generates a row change.

    Finally, the action query must operate on a table that is included in the connector’s filter configuration. Like any other captured table, the table must also be configured with the correct supplemental logging so that the event is captured.

    With this configuration in place, and assuming no long-running transaction, the offset SCN will advance on each heartbeat.

    Oracle LogMiner session duration is now controllable

    The Debezium Oracle connector’s LogMiner session duration has always been based on how often the redo log switches. Generally, this behavior has worked well for most environments; however, in low traffic environments or during off-peak hours, this has the potential to re-use the same LogMiner session for a longer period of time that can lead to ORA-04030 exceptions when the PGA memory is exhausted.

    A new configuration option has been added, log.mining.session.max.ms, allowing full control over the maximum duration of an Oracle LogMiner session. When set to a value greater-than 0, the connector will automatically close and restart the mining session if the maximum duration is reached or a log switch occurs, whichever comes first. Oracle environments with low volume, particularly during off-peak hours, should no longer notice any PGA memory concerns when enabling this new option. Coordinate with your database administrator team to determine the best value based on your environment’s configuration and activity.

    Further Changes

    But that’s not all, there were also a number of bugfixes that are noteworthy, including but not limited to:

    • MySQL connector increment snapshot failed parse datetime column lenth when connector set "snapshot.fetch.size": 20000 DBZ-4939

    • InstanceAlreadyExistsException during MongoDb connector metrics registration DBZ-5011

    • DateTimeParseException: Text 'infinity' could not be parsed in Postgres connector DBZ-5014

    • 4 Connections per connector (postgres) DBZ-5074

    • Oracle Logminer: records missed during switch from snapshot to streaming mode DBZ-5085

    • Cannot Set debezium.sink.kafka.producer.ssl.endpoint.identification.algorithm to empty value DBZ-5105

    • MilliSecondsBehindSource is not reported by SQL Server connector DBZ-5137

    • ExtractNewRecordState SMT Replaces Null Value with Column’s Default Value DBZ-5166

    • Oracle connector metrics tracking of rollback and abandoned transactions may cause high memory usage DBZ-5179

    We strongly recommend upgrading to 1.9.3.Final to get the latest improvements both in performance and stability.

    Overall, 47 issues were fixed in this release. Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Outlook

    The Debezium 1.9 release stream will remain the current long-running version for the next three months. During this time, we will continue to evaluate user reports and do micro-releases to address bugs and regressions depending on severity.

    Also, quite a lot of work has gone into Debezium 2.0. We intend to release Debezium 2.0.0.Alpha2 next week, will releases to follow about every three weeks thereafter.

    Stay tuned for more in the coming weeks and stay cool out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/06/09/debezium-2.0-alpha2-released/index.html b/blog/2022/06/09/debezium-2.0-alpha2-released/index.html index 58338ff802..c853f046e6 100644 --- a/blog/2022/06/09/debezium-2.0-alpha2-released/index.html +++ b/blog/2022/06/09/debezium-2.0-alpha2-released/index.html @@ -7,4 +7,4 @@ }

    This example does not specify the data-collections property, it is optional for the stop-snapshot signal. When this property isn’t specified, the signal implies the current in-progress incremental snapshot should be stopped entirely. This gives the ability to stop an incremental snapshot without knowledge of the current or outstanding tables or collections yet to be captured.

    Signals support regular expressions

    Incremental snapshot signals have required the use of explicit table/collection names in the data-collections payload attribute. While this worked well, there may be situations where broad capture configurations could take advantage of regular expression usage. We already support regular expressions in connector configuration options, such as include/exclude lists, so it made sense to extend that to incremental snapshots as well.

    Starting in Debezium 2.0, all incremental snapshot signals can use regular expressions in the data-collections payload property. Using one of the stop signal examples from above, the payload can be rewritten using regular expressions:

    {
       "data-collections": ["schema[1|2].table[1|2]"],
       "type": "incremental"
    -}

    Just like the explicit usage, this signal with regular expressions would also stop both schema1.table1 and schema2.table2.

    Removal of MongoDB oplog support

    In Debezium 1.8, we introduced the new MongoDB change stream feature while also deprecating the oplog implementation. The transition to change streams offers a variety of benefits, such as being able to stream changes from non-primary nodes, the ability to emit update events with a full document representation for downstream consumers, and so much more. In short, change streams is just a much more superior way to perform change data capture with MongoDB.

    The removal of the oplog implementation also means that MongoDB 3.x is no longer supported. If you are using MongoDB 3.x, you will need to upgrade to at least MongoDB 4.0 or later with Debezium 2.0.

    Configuration option clean-up

    Debezium 1.x has seen a lot of evolution over the years. We added connector-specific options to handle migration or specific features that have been deprecated or even replaced by common options that are universal for all connectors. One of the major tasks for Debezium 2.0 is to do some internal housekeeping on configuration options as many have been deprecated.

    With that, there is also more configuration housekeeping coming in the future when we look at option namespaces. Suffice to say, it will be important as a part of the upgrade path to review the connector’s documentation on its relevant options with current connector configurations. You just might find that you can streamline your configurations with fewer options or that some option names have changed entirely.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Postgres existing publication is not updated with the new table DBZ-3921

    • MySQL connector increment snapshot failed parse datetime column length when connector set "snapshot.fetch.size": 20000 DBZ-4939

    • DateTimeParseException: Text 'infinity' could not be parsed in Postgres connector DBZ-5014

    • PostgreSQL ENUM default values are missing from generated schema DBZ-5038

    • All connectors now use multi-partitioned codebase DBZ-5042

    • Oracle LogMiner: records missed during switch from snapshot to streaming mode DBZ-5085

    • Introduce a new field "ts_ms" to identify the process time for schema change event DBZ-5098

    • Parsing zero-day fails DBZ-5099

    Altogether, an amazing 110 issues were fixed for this release.

    What’s Next?

    So while this release is a bit behind schedule, Debezium 2.0 is shaping up quite well.

    The next major milestones includes unifying snapshot modes across connectors, a new Snapshotter API for all connectors, compactable JSON database history, offset unification, offset storage API and much more. So the coming weeks do have a lot in store, as we continue to work on Debezium 2.0. And as usual, you can expect some (hopefully all) of these in approximately 3-weeks, sticking to our usual release cadence.

    Until then, let the data capturing continue!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    Just like the explicit usage, this signal with regular expressions would also stop both schema1.table1 and schema2.table2.

    Removal of MongoDB oplog support

    In Debezium 1.8, we introduced the new MongoDB change stream feature while also deprecating the oplog implementation. The transition to change streams offers a variety of benefits, such as being able to stream changes from non-primary nodes, the ability to emit update events with a full document representation for downstream consumers, and so much more. In short, change streams is just a much more superior way to perform change data capture with MongoDB.

    The removal of the oplog implementation also means that MongoDB 3.x is no longer supported. If you are using MongoDB 3.x, you will need to upgrade to at least MongoDB 4.0 or later with Debezium 2.0.

    Configuration option clean-up

    Debezium 1.x has seen a lot of evolution over the years. We added connector-specific options to handle migration or specific features that have been deprecated or even replaced by common options that are universal for all connectors. One of the major tasks for Debezium 2.0 is to do some internal housekeeping on configuration options as many have been deprecated.

    With that, there is also more configuration housekeeping coming in the future when we look at option namespaces. Suffice to say, it will be important as a part of the upgrade path to review the connector’s documentation on its relevant options with current connector configurations. You just might find that you can streamline your configurations with fewer options or that some option names have changed entirely.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Postgres existing publication is not updated with the new table DBZ-3921

    • MySQL connector increment snapshot failed parse datetime column length when connector set "snapshot.fetch.size": 20000 DBZ-4939

    • DateTimeParseException: Text 'infinity' could not be parsed in Postgres connector DBZ-5014

    • PostgreSQL ENUM default values are missing from generated schema DBZ-5038

    • All connectors now use multi-partitioned codebase DBZ-5042

    • Oracle LogMiner: records missed during switch from snapshot to streaming mode DBZ-5085

    • Introduce a new field "ts_ms" to identify the process time for schema change event DBZ-5098

    • Parsing zero-day fails DBZ-5099

    Altogether, an amazing 110 issues were fixed for this release.

    What’s Next?

    So while this release is a bit behind schedule, Debezium 2.0 is shaping up quite well.

    The next major milestones includes unifying snapshot modes across connectors, a new Snapshotter API for all connectors, compactable JSON database history, offset unification, offset storage API and much more. So the coming weeks do have a lot in store, as we continue to work on Debezium 2.0. And as usual, you can expect some (hopefully all) of these in approximately 3-weeks, sticking to our usual release cadence.

    Until then, let the data capturing continue!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/06/21/debezium-1-9-4-final-released/index.html b/blog/2022/06/21/debezium-1-9-4-final-released/index.html index 3b848604f8..d3ca9b32fe 100644 --- a/blog/2022/06/21/debezium-1-9-4-final-released/index.html +++ b/blog/2022/06/21/debezium-1-9-4-final-released/index.html @@ -1 +1 @@ - Debezium 1.9.4.Final Released

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    Fixes

    This release focused entirely on stability and bugfixes. A few noteworthy changes include:

    • Include event scn in Oracle records DBZ-5225

    • Redis Store does not work with GCP Managed Redis DBZ-5268

    • Database history recovery will retain old tables after they’ve been renamed DBZ-4451

    • Adding new table with incremental snapshots not working DBZ-4834

    • Debezium has never found starting LSN DBZ-5031

    • Cursor fetch is used for all results during connection DBZ-5084

    • Debezium Postgres v1.9.3 fails in Materialize CI DBZ-5204

    • Cannot convert field type tinyint(1) unsigned to boolean DBZ-5236

    • Oracle LogMiner may fail with an in-progress transaction in an archive log that has been deleted DBZ-5256

    • Order of source block table names in a rename schema change event is not deterministic DBZ-5257

    • Debezium fails to connect to replicaset if a node is down DBZ-5260

    • Deadlock during snapshot with Mongo connector DBZ-5272

    In addition, there were several SQL parser fixes for both Oracle and MySQL.

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community which contributed to Debezium 1.9.4.Final: Anisha Mohanty, Bob Roldan, Chris Cranford, Harvey Yue, Jiri Pechanec, Jun Zhao, Oskar Polak, Rahul Khanna, René Kerner, Tim Patterson, Vojtech Juranek!

    Outlook

    The Debezium 1.9 release stream will remain the current long-running version for the next three months. During this time, we will continue to evaluate user reports and do micro-releases to address bugs and regressions depending on severity.

    Also, quite a lot of work has gone into Debezium 2.0. We are currently actively working on Debezium 2.0.0.Alpha3 and should have an update on this in the next week.

    Stay tuned for more in the coming weeks and stay cool out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.9.4.Final Released

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    Fixes

    This release focused entirely on stability and bugfixes. A few noteworthy changes include:

    • Include event scn in Oracle records DBZ-5225

    • Redis Store does not work with GCP Managed Redis DBZ-5268

    • Database history recovery will retain old tables after they’ve been renamed DBZ-4451

    • Adding new table with incremental snapshots not working DBZ-4834

    • Debezium has never found starting LSN DBZ-5031

    • Cursor fetch is used for all results during connection DBZ-5084

    • Debezium Postgres v1.9.3 fails in Materialize CI DBZ-5204

    • Cannot convert field type tinyint(1) unsigned to boolean DBZ-5236

    • Oracle LogMiner may fail with an in-progress transaction in an archive log that has been deleted DBZ-5256

    • Order of source block table names in a rename schema change event is not deterministic DBZ-5257

    • Debezium fails to connect to replicaset if a node is down DBZ-5260

    • Deadlock during snapshot with Mongo connector DBZ-5272

    In addition, there were several SQL parser fixes for both Oracle and MySQL.

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community which contributed to Debezium 1.9.4.Final: Anisha Mohanty, Bob Roldan, Chris Cranford, Harvey Yue, Jiri Pechanec, Jun Zhao, Oskar Polak, Rahul Khanna, René Kerner, Tim Patterson, Vojtech Juranek!

    Outlook

    The Debezium 1.9 release stream will remain the current long-running version for the next three months. During this time, we will continue to evaluate user reports and do micro-releases to address bugs and regressions depending on severity.

    Also, quite a lot of work has gone into Debezium 2.0. We are currently actively working on Debezium 2.0.0.Alpha3 and should have an update on this in the next week.

    Stay tuned for more in the coming weeks and stay cool out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/06/29/debezium-2.0-alpha2-released/index.html b/blog/2022/06/29/debezium-2.0-alpha2-released/index.html index 96a0f09648..2d7494f747 100644 --- a/blog/2022/06/29/debezium-2.0-alpha2-released/index.html +++ b/blog/2022/06/29/debezium-2.0-alpha2-released/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/blog/2022/07/05/debezium-2.0-alpha3-released/index.html b/blog/2022/07/05/debezium-2.0-alpha3-released/index.html index b31bd7709f..ad4e07325f 100644 --- a/blog/2022/07/05/debezium-2.0-alpha3-released/index.html +++ b/blog/2022/07/05/debezium-2.0-alpha3-released/index.html @@ -37,4 +37,4 @@ "commit_scn": "2345678901", "lcr_position": null, "txId": null -}

    You will notice that the scn field now consists of a comma-separated list of values, where each entry represents a tuple of values. This new tuple has the format of scn:rollback-segment-id:ssn:redo-thread.

    While this change is forward compatible, meaning you can safely upgrade to 2.0.0.Alpha3 and the old format can be read, once the new format is written to the offsets, the older versions of the connector will be unable to read the offsets. If you upgrade and decide you need to roll back, be aware you’ll need to manually adjust the connector offset’s scn field to simply contain a string of the most recent scn value across all redo threads.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Incorrect loading of LSN from offsets DBZ-3942

    • Database history recovery will retain old tables after they’ve been renamed DBZ-4451

    • Adding new table with incremental snapshots not working DBZ-4834

    • BigDecimal has mismatching scale value for given Decimal schema DBZ-4890

    • Debezium has never found starting LSN DBZ-5031

    • Data duplication problem using postgresql source on debezium server DBZ-5070

    • Cursor fetch is used for all results during connection DBZ-5084

    • Debezuim connector fails at parsing select statement overrides when table name has space DBZ-5198

    • DDL statement couldn’t be parsed 2 - Oracle connector 1.9.3.Final DBZ-5230

    • Debezium server duplicates scripting jar files DBZ-5232

    • Cannot convert field type tinyint(1) unsigned to boolean DBZ-5236

    • Oracle unparsable ddl create table DBZ-5237

    • Postgres Incremental Snapshot on parent partitioned table not working DBZ-5240

    • Character set influencers are not properly parsed on default values DBZ-5241

    • NPE when using Debezium Embedded in Quarkus DBZ-5251

    • Oracle LogMiner may fail with an in-progress transaction in an archive log that has been deleted DBZ-5256

    • Order of source block table names in a rename schema change event is not deterministic DBZ-5257

    • Debezium fails to connect to replicaset if a node is down DBZ-5260

    • No changes to commit_scn when oracle-connector got new lob data DBZ-5266

    • Invalid date 'SEPTEMBER 31' DBZ-5267

    • database.history.store.only.captured.tables.ddl not suppressing logs DBZ-5270

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-5271

    • Deadlock during snapshot with Mongo connector DBZ-5272

    • Mysql parser is not able to handle variables in KILL command DBZ-5273

    • Debezium server fail when connect to Azure Event Hubs DBZ-5279

    • ORA-01086 savepoint never established raised when database history topic cannot be created or does not exist DBZ-5281

    • Enabling database.history.store.only.captured.tables.ddl does not restrict history topic records DBZ-5285

    Altogether, a total of 66 issues were fixed for this release.

    What’s Next?

    You can expect a 1.9.5.Final release in the next week. This release will include many of the bugfixes that are part of this release, as we continue to improve the stability of 1.9 in micro-releases.

    You can also expect 2.0.0.Beta1 in the next 3 weeks, keeping with our usual release cadence. The next major milestones includes unifying snapshot modes across connectors, a new Snapshotter API for all connectors, compactable JSON database history, offset unification, offset storage API and much more.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    You will notice that the scn field now consists of a comma-separated list of values, where each entry represents a tuple of values. This new tuple has the format of scn:rollback-segment-id:ssn:redo-thread.

    While this change is forward compatible, meaning you can safely upgrade to 2.0.0.Alpha3 and the old format can be read, once the new format is written to the offsets, the older versions of the connector will be unable to read the offsets. If you upgrade and decide you need to roll back, be aware you’ll need to manually adjust the connector offset’s scn field to simply contain a string of the most recent scn value across all redo threads.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Incorrect loading of LSN from offsets DBZ-3942

    • Database history recovery will retain old tables after they’ve been renamed DBZ-4451

    • Adding new table with incremental snapshots not working DBZ-4834

    • BigDecimal has mismatching scale value for given Decimal schema DBZ-4890

    • Debezium has never found starting LSN DBZ-5031

    • Data duplication problem using postgresql source on debezium server DBZ-5070

    • Cursor fetch is used for all results during connection DBZ-5084

    • Debezuim connector fails at parsing select statement overrides when table name has space DBZ-5198

    • DDL statement couldn’t be parsed 2 - Oracle connector 1.9.3.Final DBZ-5230

    • Debezium server duplicates scripting jar files DBZ-5232

    • Cannot convert field type tinyint(1) unsigned to boolean DBZ-5236

    • Oracle unparsable ddl create table DBZ-5237

    • Postgres Incremental Snapshot on parent partitioned table not working DBZ-5240

    • Character set influencers are not properly parsed on default values DBZ-5241

    • NPE when using Debezium Embedded in Quarkus DBZ-5251

    • Oracle LogMiner may fail with an in-progress transaction in an archive log that has been deleted DBZ-5256

    • Order of source block table names in a rename schema change event is not deterministic DBZ-5257

    • Debezium fails to connect to replicaset if a node is down DBZ-5260

    • No changes to commit_scn when oracle-connector got new lob data DBZ-5266

    • Invalid date 'SEPTEMBER 31' DBZ-5267

    • database.history.store.only.captured.tables.ddl not suppressing logs DBZ-5270

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-5271

    • Deadlock during snapshot with Mongo connector DBZ-5272

    • Mysql parser is not able to handle variables in KILL command DBZ-5273

    • Debezium server fail when connect to Azure Event Hubs DBZ-5279

    • ORA-01086 savepoint never established raised when database history topic cannot be created or does not exist DBZ-5281

    • Enabling database.history.store.only.captured.tables.ddl does not restrict history topic records DBZ-5285

    Altogether, a total of 66 issues were fixed for this release.

    What’s Next?

    You can expect a 1.9.5.Final release in the next week. This release will include many of the bugfixes that are part of this release, as we continue to improve the stability of 1.9 in micro-releases.

    You can also expect 2.0.0.Beta1 in the next 3 weeks, keeping with our usual release cadence. The next major milestones includes unifying snapshot modes across connectors, a new Snapshotter API for all connectors, compactable JSON database history, offset unification, offset storage API and much more.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/07/11/debezium-1-9-5-final-released/index.html b/blog/2022/07/11/debezium-1-9-5-final-released/index.html index 0542d0d12a..a44cb53ede 100644 --- a/blog/2022/07/11/debezium-1-9-5-final-released/index.html +++ b/blog/2022/07/11/debezium-1-9-5-final-released/index.html @@ -1 +1 @@ - Debezium 1.9.5.Final Released

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    Changes

    This release focused entirely on stability and bugfixes. A few noteworthy changes include:

    • Data duplication problem using postgresql source on debezium server DBZ-5070

    • Duplicate SCNs on Oracle RAC installations incorrectly processed DBZ-5245

    • NPE when using Debezium Embedded in Quarkus DBZ-5251

    • No changes to commit_scn when oracle-connector got new lob data DBZ-5266

    • database.history.store.only.captured.tables.ddl not suppressing logs DBZ-5270

    • Debezium server fail when connect to Azure Event Hubs DBZ-5279

    • Enabling database.history.store.only.captured.tables.ddl does not restrict history topic records DBZ-5285

    • Snapshot fails when table’s relational model is created using an abstract data type as unique index DBZ-5300

    • Incremental Snapshot: Oracle table name parsing does not support periods in DB name DBZ-5336

    • Support PostgreSQL default value function calls with schema prefixes DBZ-5340

    • Log a warning when an unsupported LogMiner operation is detected for a captured table DBZ-5351

    • MySQL Connector column hash v2 does not work DBZ-5366

    • Outbox JSON expansion fails when nested arrays contain no elements DBZ-5367

    • docker-maven-plugin needs to be upgraded for Mac Apple M1 DBZ-5369

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community which contributed to Debezium 1.9.5.Final: Anisha Mohanty, Bob Roldan, Chai Stofkoper, Chris Cranford, Mikhail Dubrovin, Harvey Yue, Henry Cai, Jiri Pechanec, Paweł Malon, Robert Roldan, Vojtech Juranek, and yangrong688!

    Outlook

    The Debezium 1.9 release stream will remain the current long-running version for the next three months. During this time, we will continue to evaluate user reports and do micro-releases to address bugs and regressions depending on severity.

    The development on Debezium 2.0 is moving along quite nicely. We have entered the second half of the development cycle, and we’ll begin beta releases with the next release toward the end of July.

    Stay tuned for more in the coming weeks, stay cool out there, and happy capturing!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.9.5.Final Released

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    Changes

    This release focused entirely on stability and bugfixes. A few noteworthy changes include:

    • Data duplication problem using postgresql source on debezium server DBZ-5070

    • Duplicate SCNs on Oracle RAC installations incorrectly processed DBZ-5245

    • NPE when using Debezium Embedded in Quarkus DBZ-5251

    • No changes to commit_scn when oracle-connector got new lob data DBZ-5266

    • database.history.store.only.captured.tables.ddl not suppressing logs DBZ-5270

    • Debezium server fail when connect to Azure Event Hubs DBZ-5279

    • Enabling database.history.store.only.captured.tables.ddl does not restrict history topic records DBZ-5285

    • Snapshot fails when table’s relational model is created using an abstract data type as unique index DBZ-5300

    • Incremental Snapshot: Oracle table name parsing does not support periods in DB name DBZ-5336

    • Support PostgreSQL default value function calls with schema prefixes DBZ-5340

    • Log a warning when an unsupported LogMiner operation is detected for a captured table DBZ-5351

    • MySQL Connector column hash v2 does not work DBZ-5366

    • Outbox JSON expansion fails when nested arrays contain no elements DBZ-5367

    • docker-maven-plugin needs to be upgraded for Mac Apple M1 DBZ-5369

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community which contributed to Debezium 1.9.5.Final: Anisha Mohanty, Bob Roldan, Chai Stofkoper, Chris Cranford, Mikhail Dubrovin, Harvey Yue, Henry Cai, Jiri Pechanec, Paweł Malon, Robert Roldan, Vojtech Juranek, and yangrong688!

    Outlook

    The Debezium 1.9 release stream will remain the current long-running version for the next three months. During this time, we will continue to evaluate user reports and do micro-releases to address bugs and regressions depending on severity.

    The development on Debezium 2.0 is moving along quite nicely. We have entered the second half of the development cycle, and we’ll begin beta releases with the next release toward the end of July.

    Stay tuned for more in the coming weeks, stay cool out there, and happy capturing!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/07/27/debezium-2.0-beta1-released/index.html b/blog/2022/07/27/debezium-2.0-beta1-released/index.html index b9fbe508dd..72411d5e3f 100644 --- a/blog/2022/07/27/debezium-2.0-beta1-released/index.html +++ b/blog/2022/07/27/debezium-2.0-beta1-released/index.html @@ -1 +1 @@ - Debezium 2.0.0.Beta1 Released

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    Multi-partition mode now default

    Many database platforms support multi-tenancy out of the box, meaning you can have one installation of the database engine and have many unique databases. In cases like SQL Server, this traditionally required a separate connector deployment for each unique database. Over the last year, a large effort has been made to break down that barrier and to introduce a common way that any single connector deployment could connect and stream changes from multiple databases.

    The first notable change is with the SQL Server connector’s configuration option, database.dbname. This option has been replaced with a new option called database.names. As multi-partition mode is now default, this new database.names option can be specified using a comma-separated list of database names, as shown below:

    database.names=TEST1,TEST2

    In this example, the connector is being configured to capture changes from two unique databases on the same host installation. The connector will start two unique tasks in Kafka Connect and each task will be responsible for streaming changes from its respective database concurrently.

    The second notable change is with connector metrics naming. A connector exposes JMX metrics via beans that are identified with a unique name. With multi-partition mode the default with multiple tasks, each task requires its own metrics bean and so a change in the naming strategy was necessary.

    In older versions of Debezium using SQL Server as an example, metrics were available using the following naming strategy:

    debezium.sql_server:type=connector-metrics,server=<sqlserver.server.name>,context=<context>

    In this release, the naming strategy now includes a new task component in the JMX MBean name:

    debezium.sql_server:type=connector-metrics,server=<sqlserver.server.name>,task=<task.id>,context=<context>

    Please review your metrics configurations as the naming changes could have an impact when collecting Debezium metrics.

    Debezium storage module

    In this release, we have introduced a new debezium-storage set of artifacts for file- and kafka- based database history and offset storage. This change is the first of several future implementations set to support platforms such as Amazon S3, Redis, and possibly JDBC.

    For users who install connectors via plugin artifacts, this should be a seamless change as all dependencies are bundled in those plugin downloadable archives. For users who may embed Debezium in their applications or who may be building their own connector, be aware you may need to add a new storage dependency depending on which storage implementations used.

    Pluggable topic selector

    Debezium’s default topic naming strategy emits change events to topics named database.schema.table. If you require that topics be named differently, an SMT would normally be added to the connector configuration to adjust this behavior. But, this presents a challenge in situations where one of the components of this topic name, perhaps the database or table name, contains a dot (.) and perhaps an SMT doesn’t have adequate context.

    In this release, a new TopicNamingStrategy was introduced to allow fully customizing this behavior directly inside Debezium. The default naming strategy implementation should suffice in most cases, but if you find that it doesn’t you can provide a custom implementation of the TopicNamingStrategy contract to fully control various namings used by the connector. To provide your own custom strategy, you would specify the topic.naming.strategy connector option with the fully-qualified class name of the strategy, as shown below:

    topic.naming.strategy=org.myorganization.MyCustomTopicNamingStrategy

    This custom strategy is not just limited to controlling the names of topics for table mappings, but also for schema changes, transaction metadata, and heartbeats. You can refer to the DefaultTopicNamingStrategy found here as an example. This feature is still incubating and we’ll continue to improve and develop it as feedback is received.

    Oracle commit user in change events

    The source information block of change events carry a variety of context about where the change event originated. In this release, the Oracle connector now includes the user who made the database change in the captured change event. A new field, user_name, can now be found in the source info block with this new information. This field is optional, and is only available when changes are emitted using the LogMiner-based implementation. This field may also contain the value of UNKNOWN if the user associated with a change is dropped prior to the change being captured by the connector.

    Improved table unique index handling

    A table does not have to have a primary key to be captured by a Debezium connector. In cases where a primary key is not defined, Debezium will inspect a table’s unique indices to see whether a reasonable key substitution can be made. In some situations, the index may refer to columns such as CTID for PostgreSQL or ROWID in Oracle. These columns are not visible nor user-defined, but instead are hidden synthetic columns generated automatically by the database. In addition, the index may also use database functions to transform the column value that is stored, such as UPPER or LOWER for example.

    In this release, indices that rely on hidden, auto-generated columns, or columns wrapped in database functions are no longer eligible as primary key alternatives. This guarantees that when relying on an index as a primary key rather than a defined primary key itself, the generated message’s primary key value tuple directly maps to the same values used by the database to represent uniqueness.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • MongoConnector’s field exclusion configuration does not work with fields with the same name but from different collections DBZ-4846

    • Remove redundant setting of last events DBZ-5047

    • Rename docker-images repository and JIRA component to container-images DBZ-5048

    • Read Debezium Metrics From Debezium Server Consumer DBZ-5235

    • User input are not consistent on Filter step for the DBZ connectors DBZ-5246

    • KafkaDatabaseHistory without check database history topic create result caused UnknowTopicOrPartitionException DBZ-5249

    • Treat SQLServerException with "Broken pipe (Write failed)" exception message as a retriable exception DBZ-5292

    • Lob type data is inconsistent between source and sink, after modifying the primary key DBZ-5295

    • Caused by: java.io.EOFException: Failed to read next byte from position 2005308603 DBZ-5333

    • Incremental Snapshot: Oracle table name parsing does not support periods in DB name DBZ-5336

    • Support PostgreSQL default value function calls with schema prefixes DBZ-5340

    • Unsigned tinyint conversion fails for MySQL 8.x DBZ-5343

    • Log a warning when an unsupported LogMiner operation is detected for a captured table DBZ-5351

    • NullPointerException thrown when unique index based on both system and non-system generated columns DBZ-5356

    • MySQL Connector column hash v2 does not work DBZ-5366

    • Outbox JSON expansion fails when nested arrays contain no elements DBZ-5367

    • docker-maven-plugin needs to be upgraded for Mac Apple M1 DBZ-5369

    • AWS DocumentDB (with MongoDB Compatibility) Connect Fail DBZ-5371

    • Oracle Xstream does not propagate commit timestamp to transaction metadata DBZ-5373

    • UI View connector config in non-first cluster return 404 DBZ-5378

    • CommitScn not logged in expected format DBZ-5381

    • org.postgresql.util.PSQLException: Bad value for type timestamp/date/time: CURRENT_TIMESTAMP DBZ-5384

    • Missing "previousId" property with parsing the rename statement in kafka history topic DBZ-5386

    • Check constraint introduces a column based on constraint in the schema change event. DBZ-5390

    • Support storing extended attributes in relational model and JSON schema history topic DBZ-5396

    • The column is referenced as PRIMARY KEY, but a matching column is not defined in table DBZ-5398

    • Clarify which database name to use for signal.data.collection when using Oracle with pluggable database support DBZ-5399

    • Timestamp with time zone column’s default values not in GMT DBZ-5403

    • Upgrade to Kafka 3.1 broke build compatibility with Kafka 2.x and Kafka 3.0 DBZ-5404

    • Remove the duplicated SimpleDdlParserListener from mysql connector DBZ-5425

    Altogether, a total of 59 issues were fixed for this release.

    What’s Next?

    In these last few months, the team has made some incredible progress on Debezium 2.0, and we can begin to see the finish line in the distance. A large of this is in part to the grew work the community has done to contribute changes, provide feedback, and to test and help make new features stable. But we’re not done, so you can continue to expect another 2.0.0.Beta2 release in approximately 3 weeks, sticking with our usual cadence.

    In addition, we do continue to backport changes to the 1.9 branch and will likely look at a 1.9.6.Final release sometime in August to round out that release stream just before we wrap up Debezium 2.0.0.Final.

    So stay cool and safe and happy capturing!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.0.0.Beta1 Released

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    Multi-partition mode now default

    Many database platforms support multi-tenancy out of the box, meaning you can have one installation of the database engine and have many unique databases. In cases like SQL Server, this traditionally required a separate connector deployment for each unique database. Over the last year, a large effort has been made to break down that barrier and to introduce a common way that any single connector deployment could connect and stream changes from multiple databases.

    The first notable change is with the SQL Server connector’s configuration option, database.dbname. This option has been replaced with a new option called database.names. As multi-partition mode is now default, this new database.names option can be specified using a comma-separated list of database names, as shown below:

    database.names=TEST1,TEST2

    In this example, the connector is being configured to capture changes from two unique databases on the same host installation. The connector will start two unique tasks in Kafka Connect and each task will be responsible for streaming changes from its respective database concurrently.

    The second notable change is with connector metrics naming. A connector exposes JMX metrics via beans that are identified with a unique name. With multi-partition mode the default with multiple tasks, each task requires its own metrics bean and so a change in the naming strategy was necessary.

    In older versions of Debezium using SQL Server as an example, metrics were available using the following naming strategy:

    debezium.sql_server:type=connector-metrics,server=<sqlserver.server.name>,context=<context>

    In this release, the naming strategy now includes a new task component in the JMX MBean name:

    debezium.sql_server:type=connector-metrics,server=<sqlserver.server.name>,task=<task.id>,context=<context>

    Please review your metrics configurations as the naming changes could have an impact when collecting Debezium metrics.

    Debezium storage module

    In this release, we have introduced a new debezium-storage set of artifacts for file- and kafka- based database history and offset storage. This change is the first of several future implementations set to support platforms such as Amazon S3, Redis, and possibly JDBC.

    For users who install connectors via plugin artifacts, this should be a seamless change as all dependencies are bundled in those plugin downloadable archives. For users who may embed Debezium in their applications or who may be building their own connector, be aware you may need to add a new storage dependency depending on which storage implementations used.

    Pluggable topic selector

    Debezium’s default topic naming strategy emits change events to topics named database.schema.table. If you require that topics be named differently, an SMT would normally be added to the connector configuration to adjust this behavior. But, this presents a challenge in situations where one of the components of this topic name, perhaps the database or table name, contains a dot (.) and perhaps an SMT doesn’t have adequate context.

    In this release, a new TopicNamingStrategy was introduced to allow fully customizing this behavior directly inside Debezium. The default naming strategy implementation should suffice in most cases, but if you find that it doesn’t you can provide a custom implementation of the TopicNamingStrategy contract to fully control various namings used by the connector. To provide your own custom strategy, you would specify the topic.naming.strategy connector option with the fully-qualified class name of the strategy, as shown below:

    topic.naming.strategy=org.myorganization.MyCustomTopicNamingStrategy

    This custom strategy is not just limited to controlling the names of topics for table mappings, but also for schema changes, transaction metadata, and heartbeats. You can refer to the DefaultTopicNamingStrategy found here as an example. This feature is still incubating and we’ll continue to improve and develop it as feedback is received.

    Oracle commit user in change events

    The source information block of change events carry a variety of context about where the change event originated. In this release, the Oracle connector now includes the user who made the database change in the captured change event. A new field, user_name, can now be found in the source info block with this new information. This field is optional, and is only available when changes are emitted using the LogMiner-based implementation. This field may also contain the value of UNKNOWN if the user associated with a change is dropped prior to the change being captured by the connector.

    Improved table unique index handling

    A table does not have to have a primary key to be captured by a Debezium connector. In cases where a primary key is not defined, Debezium will inspect a table’s unique indices to see whether a reasonable key substitution can be made. In some situations, the index may refer to columns such as CTID for PostgreSQL or ROWID in Oracle. These columns are not visible nor user-defined, but instead are hidden synthetic columns generated automatically by the database. In addition, the index may also use database functions to transform the column value that is stored, such as UPPER or LOWER for example.

    In this release, indices that rely on hidden, auto-generated columns, or columns wrapped in database functions are no longer eligible as primary key alternatives. This guarantees that when relying on an index as a primary key rather than a defined primary key itself, the generated message’s primary key value tuple directly maps to the same values used by the database to represent uniqueness.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • MongoConnector’s field exclusion configuration does not work with fields with the same name but from different collections DBZ-4846

    • Remove redundant setting of last events DBZ-5047

    • Rename docker-images repository and JIRA component to container-images DBZ-5048

    • Read Debezium Metrics From Debezium Server Consumer DBZ-5235

    • User input are not consistent on Filter step for the DBZ connectors DBZ-5246

    • KafkaDatabaseHistory without check database history topic create result caused UnknowTopicOrPartitionException DBZ-5249

    • Treat SQLServerException with "Broken pipe (Write failed)" exception message as a retriable exception DBZ-5292

    • Lob type data is inconsistent between source and sink, after modifying the primary key DBZ-5295

    • Caused by: java.io.EOFException: Failed to read next byte from position 2005308603 DBZ-5333

    • Incremental Snapshot: Oracle table name parsing does not support periods in DB name DBZ-5336

    • Support PostgreSQL default value function calls with schema prefixes DBZ-5340

    • Unsigned tinyint conversion fails for MySQL 8.x DBZ-5343

    • Log a warning when an unsupported LogMiner operation is detected for a captured table DBZ-5351

    • NullPointerException thrown when unique index based on both system and non-system generated columns DBZ-5356

    • MySQL Connector column hash v2 does not work DBZ-5366

    • Outbox JSON expansion fails when nested arrays contain no elements DBZ-5367

    • docker-maven-plugin needs to be upgraded for Mac Apple M1 DBZ-5369

    • AWS DocumentDB (with MongoDB Compatibility) Connect Fail DBZ-5371

    • Oracle Xstream does not propagate commit timestamp to transaction metadata DBZ-5373

    • UI View connector config in non-first cluster return 404 DBZ-5378

    • CommitScn not logged in expected format DBZ-5381

    • org.postgresql.util.PSQLException: Bad value for type timestamp/date/time: CURRENT_TIMESTAMP DBZ-5384

    • Missing "previousId" property with parsing the rename statement in kafka history topic DBZ-5386

    • Check constraint introduces a column based on constraint in the schema change event. DBZ-5390

    • Support storing extended attributes in relational model and JSON schema history topic DBZ-5396

    • The column is referenced as PRIMARY KEY, but a matching column is not defined in table DBZ-5398

    • Clarify which database name to use for signal.data.collection when using Oracle with pluggable database support DBZ-5399

    • Timestamp with time zone column’s default values not in GMT DBZ-5403

    • Upgrade to Kafka 3.1 broke build compatibility with Kafka 2.x and Kafka 3.0 DBZ-5404

    • Remove the duplicated SimpleDdlParserListener from mysql connector DBZ-5425

    Altogether, a total of 59 issues were fixed for this release.

    What’s Next?

    In these last few months, the team has made some incredible progress on Debezium 2.0, and we can begin to see the finish line in the distance. A large of this is in part to the grew work the community has done to contribute changes, provide feedback, and to test and help make new features stable. But we’re not done, so you can continue to expect another 2.0.0.Beta2 release in approximately 3 weeks, sticking with our usual cadence.

    In addition, we do continue to backport changes to the 1.9 branch and will likely look at a 1.9.6.Final release sometime in August to round out that release stream just before we wrap up Debezium 2.0.0.Final.

    So stay cool and safe and happy capturing!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/09/16/debezium-2.0-beta2-released/index.html b/blog/2022/09/16/debezium-2.0-beta2-released/index.html index e542b8314a..9d13fb4bf5 100644 --- a/blog/2022/09/16/debezium-2.0-beta2-released/index.html +++ b/blog/2022/09/16/debezium-2.0-beta2-released/index.html @@ -5,4 +5,4 @@ "type": "INCREMENTAL", "additional-condition": "product_id=12" } -}

    We believe this new incremental snapshot feature will be tremendously helpful for a variety of reasons, without always having to re-snapshot all rows when only a subset of data is required.

    Signal collection automatically added to include filters

    In prior releases of Debezium, the signal collection/table used for incremental snapshots had to be manually added to your table.include.list connector property. A big theme in this release was improvements on incremental snapshots, so we’ve taken this opportunity to streamline this as well. Starting in this release, Debezium will automatically add the signal collection/table to the table inclusion filters, avoiding the need for users to manually add it.

    This change does not impose any compatibility issues. Connector configurations that already include the signal collection/table in the table.include.list property will continue to work without requiring any changes. However, if you wish to align your configuration with current behavior, you can also safely remove the signal collection/table from the table.include.list, and Debezium will begin to handle this for you automatically.

    Multitasking support for Vitess connector

    The Vitess connector previously allowed operation in two different modes that depended entirely on whether the connector configuration specified any shard details. Unfortunately in both cases, each resulted in a single task responsible for performing the VStream processing. For larger Vitess installations with many shards, this architecture could begin to show latency issues as it may not be able to keep up with all the changes across all shards. And even more complex, when specifying the shard details, this required manually resolving the shards across the cluster and starting a single Debezium connector per shard, which is both error-prone and more importantly could result in deploying many Debezium connectors.

    The Vitess community recognized this and sought to find a solution that addresses all these problems, both from a maintenance and error perspective. In Debezium 2.0 Beta2, the Vitess connector now automatically resolves the shards via a discovery mechanism, quite similar to that of MongoDB. This discovery mechanism will then split the load across multiple tasks, allowing for a single deployment of Debezium running a task per shard or shard lists, depending on the maximum number of allowed tasks for the connector.

    During the upgrade, the Vitess connector will automatically migrate the offset storage to the new format used with the multitasking behavior. But be aware that once you’ve upgraded, you won’t be able to downgrade to an earlier version as the offset storage format will have changed.

    Other fixes & improvements

    There are many bugfixes and stability changes in this release, some noteworthy are:

    • Source info of incremental snapshot events exports wrong data DBZ-4329

    • Deprecate internal key/value converter options DBZ-4617

    • "No maximum LSN recorded" log message can be spammed on low-activity databases DBZ-4631

    • Redis Sink config properties are not passed to DB history DBZ-5035

    • Upgrade SQL Server driver to 10.2.1.jre8 DBZ-5290

    • HTTP sink not retrying failing requests DBZ-5307

    • Translation from mongodb document to kafka connect schema fails when nested arrays contain no elements DBZ-5434

    • Duplicate SCNs on same thread Oracle RAC mode incorrectly processed DBZ-5439

    • Deprecate legacy topic selector for all connectors DBZ-5457

    • Remove the dependency of JdbcConnection on DatabaseSchema DBZ-5470

    • Missing the regex properties validation before start connector of DefaultRegexTopicNamingStrategy DBZ-5471

    • Create Index DDL fails to parse when using TABLESPACE clause with quoted identifier DBZ-5472

    • Outbox doesn’t check array consistency properly when it determines its schema DBZ-5475

    • Misleading statistics written to the log DBZ-5476

    • Remove SQL Server SourceTimestampMode DBZ-5477

    • Debezium connector task didn’t retry when failover in mongodb 5 DBZ-5479

    • Better error reporting for signal table failures DBZ-5484

    • Oracle DATADUMP DDL cannot be parsed DBZ-5488

    • Upgrade PostgreSQL driver to 42.4.1 DBZ-5493

    • Mysql connector parser the ddl statement failed when including keyword "buckets" DBZ-5499

    • duplicate call to config.validateAndRecord() in RedisDatabaseHistory DBZ-5506

    • DDL statement couldn’t be parsed : mismatched input 'ENGINE' DBZ-5508

    • Use “database.dbnames” in SQL Server docs DBZ-5516

    • LogMiner DML parser incorrectly interprets concatenation operator inside quoted column value DBZ-5521

    • Mysql Connector DDL Parser does not parse all privileges DBZ-5522

    • CREATE TABLE with JSON-based CHECK constraint clause causes MultipleParsingExceptions DBZ-5526

    • Disable preferring DDL before logical schema in history recovery DBZ-5535

    • EmbeddedEngine should initialize Connector using SourceConnectorContext DBZ-5534

    • Support EMPTY column identifier DBZ-5550

    • Use TCCL as the default classloader to load interface implementations DBZ-5561

    • max.queue.size.in.bytes is invalid DBZ-5569

    • Language type for listings in automatic topic creation DBZ-5573

    • Upgrade mysql-binlog-connector-java library version DBZ-5574

    • Vitess: Handle VStream close unexpectedly DBZ-5579

    • Error when parsing alter sql DBZ-5587

    • Field validation errors are misleading for positive, non-zero expectations DBZ-5588

    • Mysql connector can’t handle the case-sensitive of rename/change column statement DBZ-5589

    • LIST_VALUE_CLAUSE not allowing TIMESTAMP LITERAL DBZ-5592

    • Oracle DDL does not support comments on materialized views DBZ-5595

    • Oracle DDL does not support DEFAULT ON NULL DBZ-5605

    • Datatype mdsys.sdo_geometry not supported DBZ-5609

    Altogether, a total of 107 issues were fixed for this release.

    What’s next?

    With the release of Debezium 2.0 Beta2, we’re in the home stretch toward 2.0.0.Final. The community should expect a CR1 by the end of September and 2.0.0.Final released by the middle of October.

    In addition, our very own Gunnar Morling and I will be guests on the upcoming Quarkus Insights podcast, episode #103. We will be discussing Debezium and Quarkus, how Debezium leverages the power of Quarkus, a virtual how-to on embedding Debezium in a Quarkus-based application, and discussing all new features in Debezium 2.0. Be sure to check out the podcast and let us what you think!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    We believe this new incremental snapshot feature will be tremendously helpful for a variety of reasons, without always having to re-snapshot all rows when only a subset of data is required.

    Signal collection automatically added to include filters

    In prior releases of Debezium, the signal collection/table used for incremental snapshots had to be manually added to your table.include.list connector property. A big theme in this release was improvements on incremental snapshots, so we’ve taken this opportunity to streamline this as well. Starting in this release, Debezium will automatically add the signal collection/table to the table inclusion filters, avoiding the need for users to manually add it.

    This change does not impose any compatibility issues. Connector configurations that already include the signal collection/table in the table.include.list property will continue to work without requiring any changes. However, if you wish to align your configuration with current behavior, you can also safely remove the signal collection/table from the table.include.list, and Debezium will begin to handle this for you automatically.

    Multitasking support for Vitess connector

    The Vitess connector previously allowed operation in two different modes that depended entirely on whether the connector configuration specified any shard details. Unfortunately in both cases, each resulted in a single task responsible for performing the VStream processing. For larger Vitess installations with many shards, this architecture could begin to show latency issues as it may not be able to keep up with all the changes across all shards. And even more complex, when specifying the shard details, this required manually resolving the shards across the cluster and starting a single Debezium connector per shard, which is both error-prone and more importantly could result in deploying many Debezium connectors.

    The Vitess community recognized this and sought to find a solution that addresses all these problems, both from a maintenance and error perspective. In Debezium 2.0 Beta2, the Vitess connector now automatically resolves the shards via a discovery mechanism, quite similar to that of MongoDB. This discovery mechanism will then split the load across multiple tasks, allowing for a single deployment of Debezium running a task per shard or shard lists, depending on the maximum number of allowed tasks for the connector.

    During the upgrade, the Vitess connector will automatically migrate the offset storage to the new format used with the multitasking behavior. But be aware that once you’ve upgraded, you won’t be able to downgrade to an earlier version as the offset storage format will have changed.

    Other fixes & improvements

    There are many bugfixes and stability changes in this release, some noteworthy are:

    • Source info of incremental snapshot events exports wrong data DBZ-4329

    • Deprecate internal key/value converter options DBZ-4617

    • "No maximum LSN recorded" log message can be spammed on low-activity databases DBZ-4631

    • Redis Sink config properties are not passed to DB history DBZ-5035

    • Upgrade SQL Server driver to 10.2.1.jre8 DBZ-5290

    • HTTP sink not retrying failing requests DBZ-5307

    • Translation from mongodb document to kafka connect schema fails when nested arrays contain no elements DBZ-5434

    • Duplicate SCNs on same thread Oracle RAC mode incorrectly processed DBZ-5439

    • Deprecate legacy topic selector for all connectors DBZ-5457

    • Remove the dependency of JdbcConnection on DatabaseSchema DBZ-5470

    • Missing the regex properties validation before start connector of DefaultRegexTopicNamingStrategy DBZ-5471

    • Create Index DDL fails to parse when using TABLESPACE clause with quoted identifier DBZ-5472

    • Outbox doesn’t check array consistency properly when it determines its schema DBZ-5475

    • Misleading statistics written to the log DBZ-5476

    • Remove SQL Server SourceTimestampMode DBZ-5477

    • Debezium connector task didn’t retry when failover in mongodb 5 DBZ-5479

    • Better error reporting for signal table failures DBZ-5484

    • Oracle DATADUMP DDL cannot be parsed DBZ-5488

    • Upgrade PostgreSQL driver to 42.4.1 DBZ-5493

    • Mysql connector parser the ddl statement failed when including keyword "buckets" DBZ-5499

    • duplicate call to config.validateAndRecord() in RedisDatabaseHistory DBZ-5506

    • DDL statement couldn’t be parsed : mismatched input 'ENGINE' DBZ-5508

    • Use “database.dbnames” in SQL Server docs DBZ-5516

    • LogMiner DML parser incorrectly interprets concatenation operator inside quoted column value DBZ-5521

    • Mysql Connector DDL Parser does not parse all privileges DBZ-5522

    • CREATE TABLE with JSON-based CHECK constraint clause causes MultipleParsingExceptions DBZ-5526

    • Disable preferring DDL before logical schema in history recovery DBZ-5535

    • EmbeddedEngine should initialize Connector using SourceConnectorContext DBZ-5534

    • Support EMPTY column identifier DBZ-5550

    • Use TCCL as the default classloader to load interface implementations DBZ-5561

    • max.queue.size.in.bytes is invalid DBZ-5569

    • Language type for listings in automatic topic creation DBZ-5573

    • Upgrade mysql-binlog-connector-java library version DBZ-5574

    • Vitess: Handle VStream close unexpectedly DBZ-5579

    • Error when parsing alter sql DBZ-5587

    • Field validation errors are misleading for positive, non-zero expectations DBZ-5588

    • Mysql connector can’t handle the case-sensitive of rename/change column statement DBZ-5589

    • LIST_VALUE_CLAUSE not allowing TIMESTAMP LITERAL DBZ-5592

    • Oracle DDL does not support comments on materialized views DBZ-5595

    • Oracle DDL does not support DEFAULT ON NULL DBZ-5605

    • Datatype mdsys.sdo_geometry not supported DBZ-5609

    Altogether, a total of 107 issues were fixed for this release.

    What’s next?

    With the release of Debezium 2.0 Beta2, we’re in the home stretch toward 2.0.0.Final. The community should expect a CR1 by the end of September and 2.0.0.Final released by the middle of October.

    In addition, our very own Gunnar Morling and I will be guests on the upcoming Quarkus Insights podcast, episode #103. We will be discussing Debezium and Quarkus, how Debezium leverages the power of Quarkus, a virtual how-to on embedding Debezium in a Quarkus-based application, and discussing all new features in Debezium 2.0. Be sure to check out the podcast and let us what you think!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/09/26/debezium-1-9-6-final-released/index.html b/blog/2022/09/26/debezium-1-9-6-final-released/index.html index 6cef91867c..f89fc26b75 100644 --- a/blog/2022/09/26/debezium-1-9-6-final-released/index.html +++ b/blog/2022/09/26/debezium-1-9-6-final-released/index.html @@ -1 +1 @@ - Debezium 1.9.6.Final Released

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    Changes

    A few noteworthy bug fixes and stability improvements include:

    • Oracle SCAN VIP support DBZ-3987

    • Memory leak in EventDeserializer caused by tableMapEventByTableId DBZ-5126

    • Postgres Incremental Snapshot on parent partitioned table not working DBZ-5240

    • Lob type data is inconsistent between source and sink, after modifying the primary key DBZ-5295

    • Unsigned tinyint conversion fails for MySQL 8.x DBZ-5343

    • NullPointerException thrown when unique index based on both system and non-system generated columns DBZ-5356

    • AWS DocumentDB (with MongoDB Compatibility) Connect Fail DBZ-5371

    • org.postgresql.util.PSQLException: Bad value for type timestamp/date/time: CURRENT_TIMESTAMP DBZ-5384

    • Timestamp with time zone column’s default values not in GMT DBZ-5403

    • OffsetStore not stopped if it fails to fully start DBZ-5433

    • Duplicate SCNs on same thread Oracle RAC mode incorrectly processed DBZ-5439

    • Outbox doesn’t check array consistecy properly when it detemines its schema DBZ-5475

    • Debezium connector task didn’t retry when failover in mongodb 5 DBZ-5479

    • Use TCCL as the default classloader to load interface implementations DBZ-5561

    • Vitess: Handle VStream close unepectedly DBZ-5579

    • Oracle connector parsing SELECT_LOB_LOCATOR event missing constant unavailable.value.placeholder DBZ-5581

    • Message with LSN foo larger than expected LSN bar DBZ-5597

    • Continuously WARNs about undo transactions when LOB is enabled DBZ-5635

    • Large numbers of ROLLBACK transactions can lead to memory leak when LOB is not enabled. DBZ-5645

    In addition, there were quite a number of SQL parser fixes for both MySQL and Oracle, DBZ-5472, DBZ-5488, DBZ-5499, DBZ-5508, DBZ-5521, DBZ-5522, DBZ-5526, DBZ-5550, DBZ-5592, DBZ-5595, DBZ-5605, DBZ-5630, and DBZ-5643.

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Outlook, What’s next?

    Debezium 1.9 will continue to receive bug fix and maintenance changes throughout the early part of the next quarter. I expect there to be at least a 1.9.7.Final in the middle to late October timeframe, potentially wrapping up the 1.9 release stream.

    Debezium 2.0 is wrapping up with the latest 2.0.0.Beta2 build released just last week. We are currently focusing on bug fixes, stability, and polishing the Debezium 2.0 release stream. We expect to have 2.0.0.CR1 released in about another week or so with 2.0.0.Final scheduled for mid-October.

    Until next time, keep an eye out as we’ll soon be discussing what’s to come in Debezium 2.1 later this year!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.9.6.Final Released

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    Changes

    A few noteworthy bug fixes and stability improvements include:

    • Oracle SCAN VIP support DBZ-3987

    • Memory leak in EventDeserializer caused by tableMapEventByTableId DBZ-5126

    • Postgres Incremental Snapshot on parent partitioned table not working DBZ-5240

    • Lob type data is inconsistent between source and sink, after modifying the primary key DBZ-5295

    • Unsigned tinyint conversion fails for MySQL 8.x DBZ-5343

    • NullPointerException thrown when unique index based on both system and non-system generated columns DBZ-5356

    • AWS DocumentDB (with MongoDB Compatibility) Connect Fail DBZ-5371

    • org.postgresql.util.PSQLException: Bad value for type timestamp/date/time: CURRENT_TIMESTAMP DBZ-5384

    • Timestamp with time zone column’s default values not in GMT DBZ-5403

    • OffsetStore not stopped if it fails to fully start DBZ-5433

    • Duplicate SCNs on same thread Oracle RAC mode incorrectly processed DBZ-5439

    • Outbox doesn’t check array consistecy properly when it detemines its schema DBZ-5475

    • Debezium connector task didn’t retry when failover in mongodb 5 DBZ-5479

    • Use TCCL as the default classloader to load interface implementations DBZ-5561

    • Vitess: Handle VStream close unepectedly DBZ-5579

    • Oracle connector parsing SELECT_LOB_LOCATOR event missing constant unavailable.value.placeholder DBZ-5581

    • Message with LSN foo larger than expected LSN bar DBZ-5597

    • Continuously WARNs about undo transactions when LOB is enabled DBZ-5635

    • Large numbers of ROLLBACK transactions can lead to memory leak when LOB is not enabled. DBZ-5645

    In addition, there were quite a number of SQL parser fixes for both MySQL and Oracle, DBZ-5472, DBZ-5488, DBZ-5499, DBZ-5508, DBZ-5521, DBZ-5522, DBZ-5526, DBZ-5550, DBZ-5592, DBZ-5595, DBZ-5605, DBZ-5630, and DBZ-5643.

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Outlook, What’s next?

    Debezium 1.9 will continue to receive bug fix and maintenance changes throughout the early part of the next quarter. I expect there to be at least a 1.9.7.Final in the middle to late October timeframe, potentially wrapping up the 1.9 release stream.

    Debezium 2.0 is wrapping up with the latest 2.0.0.Beta2 build released just last week. We are currently focusing on bug fixes, stability, and polishing the Debezium 2.0 release stream. We expect to have 2.0.0.CR1 released in about another week or so with 2.0.0.Final scheduled for mid-October.

    Until next time, keep an eye out as we’ll soon be discussing what’s to come in Debezium 2.1 later this year!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/09/30/debezium-oracle-series-part-1/index.html b/blog/2022/09/30/debezium-oracle-series-part-1/index.html index 05000e2c37..6fe1ceff79 100644 --- a/blog/2022/09/30/debezium-oracle-series-part-1/index.html +++ b/blog/2022/09/30/debezium-oracle-series-part-1/index.html @@ -75,4 +75,4 @@ GRANT SELECT ON V_$LOGFILE TO c##dbzuser CONTAINER=ALL; GRANT SELECT ON V_$ARCHIVED_LOG TO c##dbzuser CONTAINER=ALL; GRANT SELECT ON V_$ARCHIVE_DEST_STATUS TO c##dbzuser CONTAINER=ALL; -GRANT SELECT ON V_$TRANSACTION TO c##dbzuser CONTAINER=ALL;

    You can refer to the latest documentation to review whether the required grants may have changed. We have created the connector user we will use in the configuration and given the user all the necessary database permissions.

    Conclusion

    In this part of the series, we have covered what Oracle is and why it is so popular. We’ve also covered installing an Oracle database using a container and configuring the Oracle instance to allow Debezium to ingest changes. In the next part of the series, we’ll dive into deploying the Debezium Oracle connector on Apache Kafka Connect.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +GRANT SELECT ON V_$TRANSACTION TO c##dbzuser CONTAINER=ALL;

    You can refer to the latest documentation to review whether the required grants may have changed. We have created the connector user we will use in the configuration and given the user all the necessary database permissions.

    Conclusion

    In this part of the series, we have covered what Oracle is and why it is so popular. We’ve also covered installing an Oracle database using a container and configuring the Oracle instance to allow Debezium to ingest changes. In the next part of the series, we’ll dive into deploying the Debezium Oracle connector on Apache Kafka Connect.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/10/06/debezium-oracle-series-part-2/index.html b/blog/2022/10/06/debezium-oracle-series-part-2/index.html index 619518155d..836d75e3e1 100644 --- a/blog/2022/10/06/debezium-oracle-series-part-2/index.html +++ b/blog/2022/10/06/debezium-oracle-series-part-2/index.html @@ -69,4 +69,4 @@ "transaction":null } } -...

    You can now use the SQLPlus terminal where you created the initial test data to INSERT, UPDATE, or DELETE records within the CUSTOMERS table. You will see corresponding change events in the terminal that is presently tailing the server1.C__DBZUSER.CUSTOMERS topic.

    Be mindful that SQLPlus does not enable auto-commit by default, so be sure that you automatically commit changes when you change data in the CUSTOMERS table so that it will be visible to the connector’s mining process.

    Conclusion

    During part one of this series, we discussed what Oracle is, why it’s so popular in the database world, and how to install and configure the database. During this part of the series, we’ve discussed how to install all the prerequisite services, including Zookeeper, Apache Kafka, and Apache Kafka Connect. In addition, we have also deployed a sample Oracle connector captured changes for the CUSTOMERS table.

    In the next part of this series, I will discuss performance, how to monitor the connector, and the most critical metrics and why they are essential. We may even build a small dashboard with metrics.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +...

    You can now use the SQLPlus terminal where you created the initial test data to INSERT, UPDATE, or DELETE records within the CUSTOMERS table. You will see corresponding change events in the terminal that is presently tailing the server1.C__DBZUSER.CUSTOMERS topic.

    Be mindful that SQLPlus does not enable auto-commit by default, so be sure that you automatically commit changes when you change data in the CUSTOMERS table so that it will be visible to the connector’s mining process.

    Conclusion

    During part one of this series, we discussed what Oracle is, why it’s so popular in the database world, and how to install and configure the database. During this part of the series, we’ve discussed how to install all the prerequisite services, including Zookeeper, Apache Kafka, and Apache Kafka Connect. In addition, we have also deployed a sample Oracle connector captured changes for the CUSTOMERS table.

    In the next part of this series, I will discuss performance, how to monitor the connector, and the most critical metrics and why they are essential. We may even build a small dashboard with metrics.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/10/10/debezium-2.0-cr1-released/index.html b/blog/2022/10/10/debezium-2.0-cr1-released/index.html index 1df8bf15bf..3aae042f51 100644 --- a/blog/2022/10/10/debezium-2.0-cr1-released/index.html +++ b/blog/2022/10/10/debezium-2.0-cr1-released/index.html @@ -1 +1 @@ - Debezium 2.0.0.CR1 Released

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    If you intend to upgrade to 2.0.0.CR1, we strongly recommend that you read the release notes before the upgrade to understand all breaking changes. There was one noteworthy breaking changes with the 2.0.0.CR1 release:

    Behavior of schema.name.adjustment.mode has changed

    The schema.name.adjustment.mode configuration property controls how schema names should be adjusted for compatibility with the message converter used by the connector. This configuration option can be one of two values:

    avro

    Repliaces the characters that cannot be used in the Avro type name with an underscore.

    none

    Does not adjust the names, even when non-Avro compliant characters are detected.

    In prior releases, Debezium always defaulted to the safe value of avro; however, starting with Debezium 2.0.0.CR1 the default value will now be none. We believe that given that the use of Avro serialization is something opted in by users based on their needs, this option should align with the same opt-in behavior.

    The safe upgrade path would be to adjust your configuration and explicitly use schema.name.adjustment.mode as avro and use the default for new connector deployments. But you can also review your topic names and configurations, checking that no underscore substitutions are happening and ergo this change will have no impact.

    MongoDB 6.0 - before state support

    MongoDB 6 supports capturing the state of the document before the change is applied. This has long since been a feature that has been available only to the relational-based connectors, but this now enables Debezium to also include the before field as part of the event’s payload for MongoDB.

    To enable this new MongoDB 6+ behavior, the capture.mode setting has been adjusted to include two new values:

    change_streams_with_pre_image

    The change event will also contain the full document from before the change as well as the final state of the document fields that were changed as a part of the change event.

    change_streams_update_full_with_pre_image

    When an update occurs, not only will the full document be present to represent the current state after the update, but the event will also contain the full document from before the change as well.

    The MongoDB before field behavior is only available on MongoDB 6 or later. If you are using a version of MongoDB before 6.0, the before field is omitted from the event output, even if configured.

    Other fixes & improvements

    There are many bugfixes and stability changes in this release, some noteworthy are:

    • Implement retries for Debezium embedded engine DBZ-4629

    • Traditional snapshot process setting source.ts_ms DBZ-5591

    • Upgrade Kafka client to 3.3.1 DBZ-5600

    • Support READ ONLY/ENCRYPTION options for alter database statment DBZ-5622

    • Clarify semantics of include/exclude options DBZ-5625

    • Added support for Mongo pre-image in change stream DBZ-5628

    • Support for using any expression in kill statements DBZ-5636

    • Debezium Db2 Connector fails to handle default values in schema when is making the snapshot DBZ-4990

    • Oracle connector parsing SELECT_LOB_LOCATOR event missing constant unavailable.value.placeholder DBZ-5581

    • Starting Embedded Engine swallows ClassNotFoundException so user cannot see why engine does not work https://issues.redhat.com/browse/DBZ-5583[DBZ-558

    • Hardcoded driver task properties are not being passed to underlying connections DBZ-5670

    • MongoDB Connector with DocumentDB errors with "{$natural: -1} is not supported" DBZ-5677

    • Upgrade apicurio to 2.2.5.Final DBZ-5549

    • Upgrade binary log client to 0.27.2 DBZ-5620

    Altogether, a total of 53 issues were fixed for this release.

    What’s next?

    With the release of Debezium 2.0 CR1, the release of 2.0.0.Final is just around the corner. The community should expect the Final release soon, barring any bug reports. In addition, we are also working on wrapping up the last installation of the 1.9 release stream, 1.9.7.Final which should will be released toward the end of this month.

    With the holiday season fast approaching, we will soon begin work on Debezium 2.1. We do intend to have a normal release cycle this quarter despite being behind on Debezium 2.0, so expect that sometime just before the end of the year.

    In the meantime, happy capturing!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.0.0.CR1 Released

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    If you intend to upgrade to 2.0.0.CR1, we strongly recommend that you read the release notes before the upgrade to understand all breaking changes. There was one noteworthy breaking changes with the 2.0.0.CR1 release:

    Behavior of schema.name.adjustment.mode has changed

    The schema.name.adjustment.mode configuration property controls how schema names should be adjusted for compatibility with the message converter used by the connector. This configuration option can be one of two values:

    avro

    Repliaces the characters that cannot be used in the Avro type name with an underscore.

    none

    Does not adjust the names, even when non-Avro compliant characters are detected.

    In prior releases, Debezium always defaulted to the safe value of avro; however, starting with Debezium 2.0.0.CR1 the default value will now be none. We believe that given that the use of Avro serialization is something opted in by users based on their needs, this option should align with the same opt-in behavior.

    The safe upgrade path would be to adjust your configuration and explicitly use schema.name.adjustment.mode as avro and use the default for new connector deployments. But you can also review your topic names and configurations, checking that no underscore substitutions are happening and ergo this change will have no impact.

    MongoDB 6.0 - before state support

    MongoDB 6 supports capturing the state of the document before the change is applied. This has long since been a feature that has been available only to the relational-based connectors, but this now enables Debezium to also include the before field as part of the event’s payload for MongoDB.

    To enable this new MongoDB 6+ behavior, the capture.mode setting has been adjusted to include two new values:

    change_streams_with_pre_image

    The change event will also contain the full document from before the change as well as the final state of the document fields that were changed as a part of the change event.

    change_streams_update_full_with_pre_image

    When an update occurs, not only will the full document be present to represent the current state after the update, but the event will also contain the full document from before the change as well.

    The MongoDB before field behavior is only available on MongoDB 6 or later. If you are using a version of MongoDB before 6.0, the before field is omitted from the event output, even if configured.

    Other fixes & improvements

    There are many bugfixes and stability changes in this release, some noteworthy are:

    • Implement retries for Debezium embedded engine DBZ-4629

    • Traditional snapshot process setting source.ts_ms DBZ-5591

    • Upgrade Kafka client to 3.3.1 DBZ-5600

    • Support READ ONLY/ENCRYPTION options for alter database statment DBZ-5622

    • Clarify semantics of include/exclude options DBZ-5625

    • Added support for Mongo pre-image in change stream DBZ-5628

    • Support for using any expression in kill statements DBZ-5636

    • Debezium Db2 Connector fails to handle default values in schema when is making the snapshot DBZ-4990

    • Oracle connector parsing SELECT_LOB_LOCATOR event missing constant unavailable.value.placeholder DBZ-5581

    • Starting Embedded Engine swallows ClassNotFoundException so user cannot see why engine does not work https://issues.redhat.com/browse/DBZ-5583[DBZ-558

    • Hardcoded driver task properties are not being passed to underlying connections DBZ-5670

    • MongoDB Connector with DocumentDB errors with "{$natural: -1} is not supported" DBZ-5677

    • Upgrade apicurio to 2.2.5.Final DBZ-5549

    • Upgrade binary log client to 0.27.2 DBZ-5620

    Altogether, a total of 53 issues were fixed for this release.

    What’s next?

    With the release of Debezium 2.0 CR1, the release of 2.0.0.Final is just around the corner. The community should expect the Final release soon, barring any bug reports. In addition, we are also working on wrapping up the last installation of the 1.9 release stream, 1.9.7.Final which should will be released toward the end of this month.

    With the holiday season fast approaching, we will soon begin work on Debezium 2.1. We do intend to have a normal release cycle this quarter despite being behind on Debezium 2.0, so expect that sometime just before the end of the year.

    In the meantime, happy capturing!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/10/17/debezium-2-0-final-released/index.html b/blog/2022/10/17/debezium-2-0-final-released/index.html index c359c03967..02c2a04ee6 100644 --- a/blog/2022/10/17/debezium-2-0-final-released/index.html +++ b/blog/2022/10/17/debezium-2-0-final-released/index.html @@ -53,4 +53,4 @@ "commit_scn": "2345678901", "lcr_position": null, "txId": null -}

    You will notice that the scn field now consists of a comma-separated list of values, where each entry represents a tuple of values. This new tuple has the format of scn:rollback-segment-id:ssn:redo-thread.

    This change is forward compatible, meaning that once you have upgraded to Debezium 2.0, an older version of the connector will be unable to read the offsets. If you do upgrade and decide to rollback, be aware the offsets will require manually adjusting the offset’s scn field to simply contain a string of the most recent scn value across all redo threads.

    Oracle commit user in change events

    The source information block of change events carry a variety of context about where the change event originated. In this release, the Oracle connector now includes the user who made the database change in the captured change event. A new field, user_name, can now be found in the source info block with this new information. This field is optional, and is only available when changes are emitted using the LogMiner-based implementation. This field may also contain the value of UNKNOWN if the user associated with a change is dropped prior to the change being captured by the connector.

    Changes to PostgreSQL connector

    Support for wal2json removed

    Throughout Debezium’s lifecycle, the PostgreSQL connector has supported multiple decoder implementations, including decoderbufs, wal2json, and pgoutput. Both the decoderbufs and wal2json plugins have required special libraries to be installed on the database server to capture changes from PostgreSQL.

    With PostgreSQL 9.6 marked as end of life in November 2021, we felt now was a great opportunity to streamline the number of supported decoders. With PostgreSQL 10 and later supporting the pgoutput decoder natively, we concluded that it made sense to remove support for the wal2json plugin in Debezium 2.0.

    If you are still using PostgreSQL 9.6 or the wal2json decoder, you will be required to upgrade to PostgreSQL 10+ or to either to the decoderbufs or the native pgoutput plugin to use Debezium going forward.

    Changes to Vitess connector

    Multitasking support for Vitess

    The Vitess connector previously allowed operation in two different modes that depended entirely on whether the connector configuration specified any shard details. Unfortunately in both cases, each resulted in a single task responsible for performing the VStream processing. For larger Vitess installations with many shards, this architecture could begin to show latency issues as it may not be able to keep up with all the changes across all shards. And even more complex, when specifying the shard details, this required manually resolving the shards across the cluster and starting a single Debezium connector per shard, which is both error-prone and more importantly could result in deploying many Debezium connectors.

    The Vitess community recognized this and sought to find a solution that addresses all these problems, both from a maintenance and error perspective. In Debezium 2.0 Beta2, the Vitess connector now automatically resolves the shards via a discovery mechanism, quite similar to that of MongoDB. This discovery mechanism will then split the load across multiple tasks, allowing for a single deployment of Debezium running a task per shard or shard lists, depending on the maximum number of allowed tasks for the connector.

    During the upgrade, the Vitess connector will automatically migrate the offset storage to the new format used with the multitasking behavior. But be aware that once you’ve upgraded, you won’t be able to downgrade to an earlier version as the offset storage format will have changed.

    Changes for Debezium container images

    Support for ARM64

    There has been a shift in recent years with the performance of ARM64, even at AWS where their 64-bit ARM processors have projected performance over the latest x86-64 processors. This has helped put an emphasis across the industry at looking at the cost benefits of supporting both architectures with containers.

    Since Debezium has traditionally released linux/amd64 -based container images, this required that you either run the images using emulation of inside a Virtual Machine. This leads to unnecessary overhead and potential performance concerns and the goal of Debezium is low-latency and hyper speed! Starting with Debezium 2.0, Debezium is now also released using ARM64 -based container images, reducing the overhead needed.

    We hope the new ARM64 container images improve the adoption of Debezium, and show that we’re committed to delivering the best change data capture experience across the industry universally.

    Community spaces

    Later this week, there will be several new community-driven discussion spaces available on our Zulip chat platform. We will be publishing a blog post that discusses the purpose of these new channels and their goals, but we wanted to also include a note here about this new feature.

    Unlike the #users channel that is meant to provide community-driven support, these spaces are meant to provide a place for the community to discuss experiences with specific database technologies, Debezium services, and topics that are substantially broader than just support. These spaces will be divided by technology, allowing the user community to target specific areas of interest easily, and engage in discussions that pertain to specific databases and services.

    These spaces are not meant to be support venues, we will still expect those to continue to foster in the #users channel going forward, so keep an eye out for these new community spaces later this week and the blog to follow.

    Other fixes & improvements

    There were many bugfixes, stability changes, and improvements throughout the development of Debezium 2.0. Altogether, a total of 463 issues were fixed for this release.

    A big thank you to all the contributors from the community who worked on this major release: Wang Min Chao, Rotem[Adhoh], Ahmed ELJAMI, Alberto Martino, Alexander Schwartz, Alexey Loubyansky, Alexey Miroshnikov, Gabor[Andras], Andrew Walker, Andrey Pustovetov, Anisha Mohanty, Avinash Vishwakarma, Bin Huang, Bob Roldan, Brad Morgan, Calin Laurentiu Ilie, Chad Marmon, Chai Stofkoper, Chris Cranford, Chris Lee, Claus Ibsen, Connor Szczepaniak, César Martínez, Debjeet Sarkar, Mikhail[Dubrovin], Eliran Agranovich, Ethan Zou, Ezer Karavani, Gabor Andras, Giljae Joo, Gunnar Morling, Hang Ruan, Harvey Yue, Henry Cai, Himanshu Mishra, Hossein Torabi, Inki Hwang, Ismail Simsek, Jakub Cechacek, Jan Doms, Jannik Steinmann, Jaromir Hamala, Jeremy Ford, Jiabao Sun, Jiri Novotny, Jiri Pechanec, Jochen Schalanda, Jun Zhao, Kanha Gupta, Katerina Galieva, Lars Werkman, Marek Winkler, Mark Allanson, Mark Bereznitsky, Martin Medek, Mickael Maison, Mike Kamornikov, Mohammad Yousuf Minhaj Zia, Nathan Bradshaw, Nathan Smit, Naveen Kumar KR, Nils Hartmann, Nir Levy, Nitin Chhabra, Oren Elias, Paul Tzen, Paweł Malon, Pengwei Dou, Phạm Ngọc Thắng, Plugaru Tudor, Oskar[Polak], Rahul Khanna, Rajendra Dangwal, René Kerner, Robert Roldan, Ruud H.G. van Tol, Sagar Rao, Sage Pierce, Seo Jae-kwon, Sergei Morozov, Shichao An, Stefan Miklosovic, Tim Patterson, Timo Roeseler, Vadzim Ramanenka, Vivek Wassan, Vojtech Juranek, Xinbin Huang, Yang, Yossi Shirizli, Zhongqiang Gong, moustapha mahfoud, yangrong688, 合龙 张, 崔世杰, and 민규 김!

    What’s next?

    While we are heading into the holiday season, we have started the work on Debezium 2.1, which will be out later this year. Some potential features you can expect include:

    • Truncate support for MySQL

    • PostgreSQL 15 support

    • JDBC history and offset storage support

    As always, this roadmap is heavily influenced by the community, i.e. you. So if you would like to see any particular items here, please let us know. For now, lets celebrate the hard work in the release of Debezium 2.0 and look forward to what’s coming later this year and in 2023!

    Onwards and Upwards!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    You will notice that the scn field now consists of a comma-separated list of values, where each entry represents a tuple of values. This new tuple has the format of scn:rollback-segment-id:ssn:redo-thread.

    This change is forward compatible, meaning that once you have upgraded to Debezium 2.0, an older version of the connector will be unable to read the offsets. If you do upgrade and decide to rollback, be aware the offsets will require manually adjusting the offset’s scn field to simply contain a string of the most recent scn value across all redo threads.

    Oracle commit user in change events

    The source information block of change events carry a variety of context about where the change event originated. In this release, the Oracle connector now includes the user who made the database change in the captured change event. A new field, user_name, can now be found in the source info block with this new information. This field is optional, and is only available when changes are emitted using the LogMiner-based implementation. This field may also contain the value of UNKNOWN if the user associated with a change is dropped prior to the change being captured by the connector.

    Changes to PostgreSQL connector

    Support for wal2json removed

    Throughout Debezium’s lifecycle, the PostgreSQL connector has supported multiple decoder implementations, including decoderbufs, wal2json, and pgoutput. Both the decoderbufs and wal2json plugins have required special libraries to be installed on the database server to capture changes from PostgreSQL.

    With PostgreSQL 9.6 marked as end of life in November 2021, we felt now was a great opportunity to streamline the number of supported decoders. With PostgreSQL 10 and later supporting the pgoutput decoder natively, we concluded that it made sense to remove support for the wal2json plugin in Debezium 2.0.

    If you are still using PostgreSQL 9.6 or the wal2json decoder, you will be required to upgrade to PostgreSQL 10+ or to either to the decoderbufs or the native pgoutput plugin to use Debezium going forward.

    Changes to Vitess connector

    Multitasking support for Vitess

    The Vitess connector previously allowed operation in two different modes that depended entirely on whether the connector configuration specified any shard details. Unfortunately in both cases, each resulted in a single task responsible for performing the VStream processing. For larger Vitess installations with many shards, this architecture could begin to show latency issues as it may not be able to keep up with all the changes across all shards. And even more complex, when specifying the shard details, this required manually resolving the shards across the cluster and starting a single Debezium connector per shard, which is both error-prone and more importantly could result in deploying many Debezium connectors.

    The Vitess community recognized this and sought to find a solution that addresses all these problems, both from a maintenance and error perspective. In Debezium 2.0 Beta2, the Vitess connector now automatically resolves the shards via a discovery mechanism, quite similar to that of MongoDB. This discovery mechanism will then split the load across multiple tasks, allowing for a single deployment of Debezium running a task per shard or shard lists, depending on the maximum number of allowed tasks for the connector.

    During the upgrade, the Vitess connector will automatically migrate the offset storage to the new format used with the multitasking behavior. But be aware that once you’ve upgraded, you won’t be able to downgrade to an earlier version as the offset storage format will have changed.

    Changes for Debezium container images

    Support for ARM64

    There has been a shift in recent years with the performance of ARM64, even at AWS where their 64-bit ARM processors have projected performance over the latest x86-64 processors. This has helped put an emphasis across the industry at looking at the cost benefits of supporting both architectures with containers.

    Since Debezium has traditionally released linux/amd64 -based container images, this required that you either run the images using emulation of inside a Virtual Machine. This leads to unnecessary overhead and potential performance concerns and the goal of Debezium is low-latency and hyper speed! Starting with Debezium 2.0, Debezium is now also released using ARM64 -based container images, reducing the overhead needed.

    We hope the new ARM64 container images improve the adoption of Debezium, and show that we’re committed to delivering the best change data capture experience across the industry universally.

    Community spaces

    Later this week, there will be several new community-driven discussion spaces available on our Zulip chat platform. We will be publishing a blog post that discusses the purpose of these new channels and their goals, but we wanted to also include a note here about this new feature.

    Unlike the #users channel that is meant to provide community-driven support, these spaces are meant to provide a place for the community to discuss experiences with specific database technologies, Debezium services, and topics that are substantially broader than just support. These spaces will be divided by technology, allowing the user community to target specific areas of interest easily, and engage in discussions that pertain to specific databases and services.

    These spaces are not meant to be support venues, we will still expect those to continue to foster in the #users channel going forward, so keep an eye out for these new community spaces later this week and the blog to follow.

    Other fixes & improvements

    There were many bugfixes, stability changes, and improvements throughout the development of Debezium 2.0. Altogether, a total of 463 issues were fixed for this release.

    A big thank you to all the contributors from the community who worked on this major release: Wang Min Chao, Rotem[Adhoh], Ahmed ELJAMI, Alberto Martino, Alexander Schwartz, Alexey Loubyansky, Alexey Miroshnikov, Gabor[Andras], Andrew Walker, Andrey Pustovetov, Anisha Mohanty, Avinash Vishwakarma, Bin Huang, Bob Roldan, Brad Morgan, Calin Laurentiu Ilie, Chad Marmon, Chai Stofkoper, Chris Cranford, Chris Lee, Claus Ibsen, Connor Szczepaniak, César Martínez, Debjeet Sarkar, Mikhail[Dubrovin], Eliran Agranovich, Ethan Zou, Ezer Karavani, Gabor Andras, Giljae Joo, Gunnar Morling, Hang Ruan, Harvey Yue, Henry Cai, Himanshu Mishra, Hossein Torabi, Inki Hwang, Ismail Simsek, Jakub Cechacek, Jan Doms, Jannik Steinmann, Jaromir Hamala, Jeremy Ford, Jiabao Sun, Jiri Novotny, Jiri Pechanec, Jochen Schalanda, Jun Zhao, Kanha Gupta, Katerina Galieva, Lars Werkman, Marek Winkler, Mark Allanson, Mark Bereznitsky, Martin Medek, Mickael Maison, Mike Kamornikov, Mohammad Yousuf Minhaj Zia, Nathan Bradshaw, Nathan Smit, Naveen Kumar KR, Nils Hartmann, Nir Levy, Nitin Chhabra, Oren Elias, Paul Tzen, Paweł Malon, Pengwei Dou, Phạm Ngọc Thắng, Plugaru Tudor, Oskar[Polak], Rahul Khanna, Rajendra Dangwal, René Kerner, Robert Roldan, Ruud H.G. van Tol, Sagar Rao, Sage Pierce, Seo Jae-kwon, Sergei Morozov, Shichao An, Stefan Miklosovic, Tim Patterson, Timo Roeseler, Vadzim Ramanenka, Vivek Wassan, Vojtech Juranek, Xinbin Huang, Yang, Yossi Shirizli, Zhongqiang Gong, moustapha mahfoud, yangrong688, 合龙 张, 崔世杰, and 민규 김!

    What’s next?

    While we are heading into the holiday season, we have started the work on Debezium 2.1, which will be out later this year. Some potential features you can expect include:

    • Truncate support for MySQL

    • PostgreSQL 15 support

    • JDBC history and offset storage support

    As always, this roadmap is heavily influenced by the community, i.e. you. So if you would like to see any particular items here, please let us know. For now, lets celebrate the hard work in the release of Debezium 2.0 and look forward to what’s coming later this year and in 2023!

    Onwards and Upwards!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/10/20/flaky-tests/index.html b/blog/2022/10/20/flaky-tests/index.html index 41f129ef36..aaa94c4b5d 100644 --- a/blog/2022/10/20/flaky-tests/index.html +++ b/blog/2022/10/20/flaky-tests/index.html @@ -8,4 +8,4 @@ Proxy has no Upstream toxics enabled. Downstream toxics: -mysql_latency: type=latency stream=downstream toxicity=1.00 attributes=[ jitter=0 latency=500 ]

    Now, run the test again. Did you observe that the test ran substantially longer? If yes, everything works as expected, as we added latency to every call to the database.

    This is a simple example of adding toxic to the Toxiproxy. Toxiproxy provides many more options and ways to configure the toxics. See Toxiproxy for more details.

    Once we are done, we can remove toxic

    toxiproxy-cli toxic remove mysql -n mysql_latency

    as well as proxy itself:

    toxiproxy-cli delete mysql

    or simply stop and delete the container.

    Summary

    In this blog post I tried to show a couple of techniques which may help you to simulate flaky test failures locally. All of them try to make the test environment less responsive, namely by limiting CPU or imposing network latencies using Toxiproxy. There are many other reasons why the tests can be flaky, in many parts of your application stack, and also there are many other tools which can inject various kinds of failures (e.g. disk failures). So this post is not by far exhaustive. But I hope it will help you to debug at least some of the flaky tests, if not in the Debezium project, then at least in your own project.

    All these things, especially Toxiproxy, can be also used on a regular basis, even in the CI, to spot various hidden issues in the project which appears only when the environment where it runs doesn’t behave nicely.

    Feel free to share in the discussion any other tips on how to debug flaky tests and what kind of tools you find handy.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +mysql_latency: type=latency stream=downstream toxicity=1.00 attributes=[ jitter=0 latency=500 ]

    Now, run the test again. Did you observe that the test ran substantially longer? If yes, everything works as expected, as we added latency to every call to the database.

    This is a simple example of adding toxic to the Toxiproxy. Toxiproxy provides many more options and ways to configure the toxics. See Toxiproxy for more details.

    Once we are done, we can remove toxic

    toxiproxy-cli toxic remove mysql -n mysql_latency

    as well as proxy itself:

    toxiproxy-cli delete mysql

    or simply stop and delete the container.

    Summary

    In this blog post I tried to show a couple of techniques which may help you to simulate flaky test failures locally. All of them try to make the test environment less responsive, namely by limiting CPU or imposing network latencies using Toxiproxy. There are many other reasons why the tests can be flaky, in many parts of your application stack, and also there are many other tools which can inject various kinds of failures (e.g. disk failures). So this post is not by far exhaustive. But I hope it will help you to debug at least some of the flaky tests, if not in the Debezium project, then at least in your own project.

    All these things, especially Toxiproxy, can be also used on a regular basis, even in the CI, to spot various hidden issues in the project which appears only when the environment where it runs doesn’t behave nicely.

    Feel free to share in the discussion any other tips on how to debug flaky tests and what kind of tools you find handy.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/10/26/debezium-1-9-7-final-released/index.html b/blog/2022/10/26/debezium-1-9-7-final-released/index.html index 69b0b93b94..7ad45d3465 100644 --- a/blog/2022/10/26/debezium-1-9-7-final-released/index.html +++ b/blog/2022/10/26/debezium-1-9-7-final-released/index.html @@ -1 +1 @@ - Debezium 1.9.7.Final Released

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Changes

    A few noteworthy bug fixes and stability improvements include:

    • Debezium connectors ship with an old version of google-protobuf vulnerable to CVE-2022-3171 DBZ-5747

    • ORA-01289: cannot add duplicate logfile DBZ-5276

    • Using snapshot boundary mode "all" causes DebeziumException on Oracle RAC DBZ-5302

    • Missing snapshot pending transactions DBZ-5482

    • Outbox pattern nested payload leads to connector crash DBZ-5654

    • Keyword virtual can be used as an identifier DBZ-5674

    • MongoDB Connector with DocumentDB errors with "{$natural: -1} is not supported" DBZ-5677

    • Function DATE_ADD can be used as an identifier DBZ-5679

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • MySqlConnector parse create view statement failed DBZ-5708

    • Debezium Server 1.9.6 is using MSSQL JDBC 7.2.2 instead of 9.4.1 DBZ-5711

    • Vitess: Handle Vstream error: unexpected server EOF DBZ-5722

    • ParsingException: DDL statement couldn’t be parsed (index hints) DBZ-5724

    • Oracle SQL parsing error when collation used DBZ-5726

    • Unparseable DDL statement DBZ-5734

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community who contributed to Debezium 1.9.7.Final: Anisha Mohanty, Bin Huang, Bob Roldan, Chris Cranford, Harvey Yue, Henry Cai, Jakub Cechacek, Jan Werner, Jiri Pechanec, Jochen Schalanda, Nils Hartmann, Phạm Ngọc Thắng, Sage Pierce, Stefan Miklosovic, and Vojtech Juranek!

    Outlook, What’s next?

    This past year has been packed full of tons of changes. This makes the eighth and likely final stable release for Debezium 1.9 as we begin to turn our attention fully to Debezium 2.0 moving forward.

    With Debezium 2.0 released on October 17th, just last week, the team is now hard at work addressing your feedback, so keep that coming. We’re also actively working on the next installment of Debezium, 2.1, which will be released later this year. Be sure to keep an eye on our road map in the coming week as we intend to debut what is planned for Debezium 2.1 and what’s to come in 2023!

    Until then, stay safe!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 1.9.7.Final Released

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Changes

    A few noteworthy bug fixes and stability improvements include:

    • Debezium connectors ship with an old version of google-protobuf vulnerable to CVE-2022-3171 DBZ-5747

    • ORA-01289: cannot add duplicate logfile DBZ-5276

    • Using snapshot boundary mode "all" causes DebeziumException on Oracle RAC DBZ-5302

    • Missing snapshot pending transactions DBZ-5482

    • Outbox pattern nested payload leads to connector crash DBZ-5654

    • Keyword virtual can be used as an identifier DBZ-5674

    • MongoDB Connector with DocumentDB errors with "{$natural: -1} is not supported" DBZ-5677

    • Function DATE_ADD can be used as an identifier DBZ-5679

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • MySqlConnector parse create view statement failed DBZ-5708

    • Debezium Server 1.9.6 is using MSSQL JDBC 7.2.2 instead of 9.4.1 DBZ-5711

    • Vitess: Handle Vstream error: unexpected server EOF DBZ-5722

    • ParsingException: DDL statement couldn’t be parsed (index hints) DBZ-5724

    • Oracle SQL parsing error when collation used DBZ-5726

    • Unparseable DDL statement DBZ-5734

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community who contributed to Debezium 1.9.7.Final: Anisha Mohanty, Bin Huang, Bob Roldan, Chris Cranford, Harvey Yue, Henry Cai, Jakub Cechacek, Jan Werner, Jiri Pechanec, Jochen Schalanda, Nils Hartmann, Phạm Ngọc Thắng, Sage Pierce, Stefan Miklosovic, and Vojtech Juranek!

    Outlook, What’s next?

    This past year has been packed full of tons of changes. This makes the eighth and likely final stable release for Debezium 1.9 as we begin to turn our attention fully to Debezium 2.0 moving forward.

    With Debezium 2.0 released on October 17th, just last week, the team is now hard at work addressing your feedback, so keep that coming. We’re also actively working on the next installment of Debezium, 2.1, which will be released later this year. Be sure to keep an eye on our road map in the coming week as we intend to debut what is planned for Debezium 2.1 and what’s to come in 2023!

    Until then, stay safe!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/10/26/debezium-evolving/index.html b/blog/2022/10/26/debezium-evolving/index.html index af862cbd12..cb58f63624 100644 --- a/blog/2022/10/26/debezium-evolving/index.html +++ b/blog/2022/10/26/debezium-evolving/index.html @@ -1 +1 @@ - Debezium Evolving

    Some time in early 2017, I got a meeting invite from Debezium’s founder, Randall Hauch. He was about to begin a new chapter in his professional career and was looking for someone to take over as the project lead for Debezium. So we hopped on a call to talk things through, and I was immediately sold on the concept of change data capture, its large number of potential use cases and applications, and the idea of making this available to the community as open-source. After some short consideration I decided to take up this opportunity, and without a doubt this has been one of the best decisions I’ve ever made in my job.

    Today, five years and two major releases (1.0, 2.0) later, I am feeling really proud of what the Debezium community has accomplished, having established itself as the leading open-source platform for change data capture. The number of officially supported databases has grown from three to eight. Further Debezium-based CDC connectors are developed externally by database vendors like ScyllaDB and Yugabyte, making Debezium’s change event format kind of a de-facto standard for CDC. The project is used in production by companies such as Reddit, Shopify, Ubisoft, and Zalando. Debezium became part of Red Hat’s commercially supported product offerings (on-prem, as well as fully managed in the cloud), with multiple other vendors providing Debezium-backed offers as well. During the keynote of this year’s Current conference, Debezium was recognized as one of the most impactful open-source projects in the Apache Kafka space.

    The most important part to me though is the tremendous growth of the Debezium community itself. To this day, more than 450 individuals have contributed to the code base. A big thank you to all the people and organizations who’ve worked tirelessly to make the vision of open-source change data capture a reality and continue to improve it every day: Red Hat — as the project’s main sponsor — Stripe, Instaclustr, SugarCRM, Redis, and many other companies and individual contributors!

    After ten amazing years at Red Hat, I felt that it was about time for a change for me and start some new adventure, and I am going to join a start-up in the data streaming space next month. As part of this transition, I am also stepping down from the role as the project lead for Debezium. While I’ll be less active in the project on a daily basis, I definitely plan to stay involved and hopefully still send the one or other pull request.

    My partner in crime Jiri Pechanec will take over as the acting engineering lead. Or, I should say, has taken over, since in fact he has had that role since earlier this year already. Jiri has been a member of the project for many years, working on several key features such as incremental snapshots and MongoDB change streams support. He’s an outstanding software engineer, with a unique insight into the problem space of CDC and decades of experience working in open source, and he will be an amazing lead for the Debezium project and community.

    With the Debezium 2.0 release just through the door, addressing several consistency issues and getting rid of a fair chunk of technical debt, the project is in an excellent position for its future evolution. There are plans for another community-led connector which should be announced very soon, there’ll be support for exactly-once semantics as recently introduced in Kafka Connect (KIP -618), a Kubernetes operator for Debezium Server, a JDBC sink connector, and much more.

    The best is yet to come, and I can’t wait to see what this amazing community will build next!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium Evolving

    Some time in early 2017, I got a meeting invite from Debezium’s founder, Randall Hauch. He was about to begin a new chapter in his professional career and was looking for someone to take over as the project lead for Debezium. So we hopped on a call to talk things through, and I was immediately sold on the concept of change data capture, its large number of potential use cases and applications, and the idea of making this available to the community as open-source. After some short consideration I decided to take up this opportunity, and without a doubt this has been one of the best decisions I’ve ever made in my job.

    Today, five years and two major releases (1.0, 2.0) later, I am feeling really proud of what the Debezium community has accomplished, having established itself as the leading open-source platform for change data capture. The number of officially supported databases has grown from three to eight. Further Debezium-based CDC connectors are developed externally by database vendors like ScyllaDB and Yugabyte, making Debezium’s change event format kind of a de-facto standard for CDC. The project is used in production by companies such as Reddit, Shopify, Ubisoft, and Zalando. Debezium became part of Red Hat’s commercially supported product offerings (on-prem, as well as fully managed in the cloud), with multiple other vendors providing Debezium-backed offers as well. During the keynote of this year’s Current conference, Debezium was recognized as one of the most impactful open-source projects in the Apache Kafka space.

    The most important part to me though is the tremendous growth of the Debezium community itself. To this day, more than 450 individuals have contributed to the code base. A big thank you to all the people and organizations who’ve worked tirelessly to make the vision of open-source change data capture a reality and continue to improve it every day: Red Hat — as the project’s main sponsor — Stripe, Instaclustr, SugarCRM, Redis, and many other companies and individual contributors!

    After ten amazing years at Red Hat, I felt that it was about time for a change for me and start some new adventure, and I am going to join a start-up in the data streaming space next month. As part of this transition, I am also stepping down from the role as the project lead for Debezium. While I’ll be less active in the project on a daily basis, I definitely plan to stay involved and hopefully still send the one or other pull request.

    My partner in crime Jiri Pechanec will take over as the acting engineering lead. Or, I should say, has taken over, since in fact he has had that role since earlier this year already. Jiri has been a member of the project for many years, working on several key features such as incremental snapshots and MongoDB change streams support. He’s an outstanding software engineer, with a unique insight into the problem space of CDC and decades of experience working in open source, and he will be an amazing lead for the Debezium project and community.

    With the Debezium 2.0 release just through the door, addressing several consistency issues and getting rid of a fair chunk of technical debt, the project is in an excellent position for its future evolution. There are plans for another community-led connector which should be announced very soon, there’ll be support for exactly-once semantics as recently introduced in Kafka Connect (KIP -618), a Kubernetes operator for Debezium Server, a JDBC sink connector, and much more.

    The best is yet to come, and I can’t wait to see what this amazing community will build next!

    Gunnar Morling

    Gunnar is a software engineer at Decodable and an open-source enthusiast by heart. He has been the project lead of Debezium over many years. Gunnar has created open-source projects like kcctl, JfrUnit, and MapStruct, and is the spec lead for Bean Validation 2.0 (JSR 380). He’s based in Hamburg, Germany.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/11/10/debezium-2-1-alpha1-released/index.html b/blog/2022/11/10/debezium-2-1-alpha1-released/index.html index f55a3eb1cd..fd24131cc3 100644 --- a/blog/2022/11/10/debezium-2-1-alpha1-released/index.html +++ b/blog/2022/11/10/debezium-2-1-alpha1-released/index.html @@ -12,4 +12,4 @@ }, "op": "t", "ts_ms": 1465581029523 -}

    The most notable point here is that truncate events do not contain a before or after state.

    New Redis-based storage module

    Debezium recently modularized parts of its codebase around persisting offsets and schema history into a set of modules supporting File and Kafka -based implementation. In Debezium 2.1, a new module was introduced to support persisting to Redis data stores.

    The following fully-qualified class names can be used to persist offsets or schema history to Redis data stores:

    • io.debezium.storage.redis.offset.RedisOffsetBackingStore

    • io.debezium.storage.redis.history.RedisSchemaHistory

    If you have manually installed Debezium, be sure to include the debezium-storage-redis artifact on your classpath if it does not exist in order to gain access to these new implementations.

    For information about what options can be configured with this new implementation, please see the source configuration section of the Debezium Server documentation and look for configuration options prefixed with:

    • debezium.source.offset.storage.redis.*

    • debezium.source.schema.history.internal.redis.*

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Missing snapshot pending transactions DBZ-5482

    • Using snapshot.mode ALWAYS uses SCN from offsets DBZ-5626

    • MongoDB multiple tasks monitor misalignment DBZ-5629

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • Columns are not excluded when doing incremental snapshots DBZ-5727

    • NullPointerException thrown during snapshot of tables in Oracle source connector DBZ-5738

    • Hostname not available for load balanced ocp services in ARO DBZ-5753

    • Exclude Oracle Compression Advisor tables from capture to avoid infinite loop DBZ-5756

    • Message with LSN 'LSN{XYZ}' not present among LSNs seen in the location phase DBZ-5792

    What’s Next?

    So as we continue to work on Debezium 2.1, we’ve been able to include a number of the expected changes in today’s release, but we still do intend to deliver on a new Single Message Transformation (SMT) for generating change event deltas before the end of the year. There is also some much anticipated changes for Debezium UI, such as supporting editing of connector configurations and much more.

    You can find this information and what else to expect as a part of Debezium in 2023 in our recently updated road map. We have quite a lot of new features planned for next year, and we would love to hear your feedback or suggestions on things that may not be on the roadmap you’d like to see. Be sure to get in touch with us on the mailing list if there is.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    The most notable point here is that truncate events do not contain a before or after state.

    New Redis-based storage module

    Debezium recently modularized parts of its codebase around persisting offsets and schema history into a set of modules supporting File and Kafka -based implementation. In Debezium 2.1, a new module was introduced to support persisting to Redis data stores.

    The following fully-qualified class names can be used to persist offsets or schema history to Redis data stores:

    • io.debezium.storage.redis.offset.RedisOffsetBackingStore

    • io.debezium.storage.redis.history.RedisSchemaHistory

    If you have manually installed Debezium, be sure to include the debezium-storage-redis artifact on your classpath if it does not exist in order to gain access to these new implementations.

    For information about what options can be configured with this new implementation, please see the source configuration section of the Debezium Server documentation and look for configuration options prefixed with:

    • debezium.source.offset.storage.redis.*

    • debezium.source.schema.history.internal.redis.*

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Missing snapshot pending transactions DBZ-5482

    • Using snapshot.mode ALWAYS uses SCN from offsets DBZ-5626

    • MongoDB multiple tasks monitor misalignment DBZ-5629

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • Columns are not excluded when doing incremental snapshots DBZ-5727

    • NullPointerException thrown during snapshot of tables in Oracle source connector DBZ-5738

    • Hostname not available for load balanced ocp services in ARO DBZ-5753

    • Exclude Oracle Compression Advisor tables from capture to avoid infinite loop DBZ-5756

    • Message with LSN 'LSN{XYZ}' not present among LSNs seen in the location phase DBZ-5792

    What’s Next?

    So as we continue to work on Debezium 2.1, we’ve been able to include a number of the expected changes in today’s release, but we still do intend to deliver on a new Single Message Transformation (SMT) for generating change event deltas before the end of the year. There is also some much anticipated changes for Debezium UI, such as supporting editing of connector configurations and much more.

    You can find this information and what else to expect as a part of Debezium in 2023 in our recently updated road map. We have quite a lot of new features planned for next year, and we would love to hear your feedback or suggestions on things that may not be on the roadmap you’d like to see. Be sure to get in touch with us on the mailing list if there is.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/11/15/filling-the-ranks/index.html b/blog/2022/11/15/filling-the-ranks/index.html index 7c5b95c368..ee98ecad87 100644 --- a/blog/2022/11/15/filling-the-ranks/index.html +++ b/blog/2022/11/15/filling-the-ranks/index.html @@ -1 +1 @@ - Filling the Ranks

    As you are probably well aware, Gunnar Morling has stepped down from his position as Debezium project lead and is now pursuing new exciting adventures. It is sad, but every cloud has a silver lining!

    What can it be? We (the Debezium team and Red Hat) are hiring! Are you a community contributor? Do you have any pull requests under your belt? Are you a happy Debezium user and eager to do more, or are you a seasoned Java developer looking for work in an exciting and inclusive open-source environment?

    If any of that describes you, don’t hesitate to contact me (Jiri Pechanec <jpechane@redhat.com>) via email or our Zulip chat. I cannot promise you will be selected, but I can promise an open and fair process.

    The following are a guideline, things we’d love to see but aren’t all required. If there are specific expectations, those are indicated inline.

    • Multiple years of Java development experience

      • JDBC knowledge expected

    • Enterprise Java is not required but the knowledge of integration patterns like message bus (JMS), routing, etc. is welcome

    • Kafka or Kafka Connect experience is welcome

    • At least user knowledge of some of the databases supported by Debezium is preferred

      • Basic database concepts like transactions (ACID), and transaction logs are expected

    • Open-source contributions are a plus

    • Debezium contributions are a huge plus

    We can promise engaging and interesting work, an excellent and inclusive team that treats everyone with respect, and a wonderful community that is vibrant and growing daily.

    Don’t be shy, and don’t underestimate yourself. We would rather speak to more people than miss you!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Filling the Ranks

    As you are probably well aware, Gunnar Morling has stepped down from his position as Debezium project lead and is now pursuing new exciting adventures. It is sad, but every cloud has a silver lining!

    What can it be? We (the Debezium team and Red Hat) are hiring! Are you a community contributor? Do you have any pull requests under your belt? Are you a happy Debezium user and eager to do more, or are you a seasoned Java developer looking for work in an exciting and inclusive open-source environment?

    If any of that describes you, don’t hesitate to contact me (Jiri Pechanec <jpechane@redhat.com>) via email or our Zulip chat. I cannot promise you will be selected, but I can promise an open and fair process.

    The following are a guideline, things we’d love to see but aren’t all required. If there are specific expectations, those are indicated inline.

    • Multiple years of Java development experience

      • JDBC knowledge expected

    • Enterprise Java is not required but the knowledge of integration patterns like message bus (JMS), routing, etc. is welcome

    • Kafka or Kafka Connect experience is welcome

    • At least user knowledge of some of the databases supported by Debezium is preferred

      • Basic database concepts like transactions (ACID), and transaction logs are expected

    • Open-source contributions are a plus

    • Debezium contributions are a huge plus

    We can promise engaging and interesting work, an excellent and inclusive team that treats everyone with respect, and a wonderful community that is vibrant and growing daily.

    Don’t be shy, and don’t underestimate yourself. We would rather speak to more people than miss you!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2022/12/22/debezium-2-1-final-released/index.html b/blog/2022/12/22/debezium-2-1-final-released/index.html index 6e8d1c45c8..8fa0873913 100644 --- a/blog/2022/12/22/debezium-2-1-final-released/index.html +++ b/blog/2022/12/22/debezium-2-1-final-released/index.html @@ -1 +1 @@ - Debezium 2.1.0.Final/Debezium 2.1.1.Final Released

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    Release 2.1.0.Final did not contain a mandatory dependency. This is fixed in 2.1.1.Final hot update.

    After a plenty of intensive effort we woud like to present Nancy Xu as the leading engineer behind the implementation of the Debezium Spanner connector for Google’s Cloud Spanner distributed database. The connector itself is now in incubating state and still not fully feature complete (for example initial snapshots are not supported yet). Yet it is ready for a general use in scenarios where a robust Spanner-to-Kafka streaming implementation is required.

    The initial release provides

    As exciting these news are, this is not the only new feature available for Debezium. The release brings a nice pack of additional improvements.

    • Vitess connector supports initial snapshotting. This is a completely new feature and means that now the default behaviour for a new Vitess connector instance is to snapshot the current table content and then switch to streaming.

    • Starting Debezium 2.0 we extracted a set of interfaces to provide additional pluggable persistent stores. Redis offset and internal schema history store was converted into module and is now available for generic use.

    • MySQL connector processes TRUNCATE TABLE commands. When detected a t message is emitted into the table topic. This feature is optional and is diabled by default.

    • Kafka Connect provides so-called predicates that enable user to apply transfromations conditionally. Debezium Engine and Debezium Server supports the same functionality too and is configured in the same way as kafka Connect.

    • PostgreSQL connector is compatible with PostgreSQL 15.

    • Cassandra connector is from the very start a bit odd duckling in the way how the codebase is written and connector deployed. This meant that only Kafka was suported as a destination. The connector was rewritten so it now can run inside Debezium Server and so any supported sink can be used as the destination.

    • Nats JetStream is the new sink provided by Debezium Server.

    • Kafka Connect by default calculates the topic partition number based on the message primary key. With the new ComputePartition it is possible to define a list of per-table columns to explicitly calculate and set the partition number.

    • PostgreSQL flushes LSN (and thus truncates the WAL) when the message is recorded in Kafka. For scenarios that prefer manual WAL management it is possible to disable this behaviour.

    • MongoDB connector always connected and streamed from primary node in the cluster. This is no longer necessary and non-primary nodes are preferred.

    Other fixes & improvements

    There were many bugfixes, stability changes, and improvements throughout the development of Debezium 2.1. Altogether, a total of 149 issues were fixed for this release.

    What’s next?

    So what are our current plans for the first quarter of the next year? Some potential features you can expect include:

    • Initial work on JDBC sink connector

    • Configurable signalling channels

    • JDBC and S3 history and offset storage support

    As always, this roadmap is heavily influenced by the community, i.e. you. So if you would like to see any particular items here, please let us know.

    Merry Christmas and Happy New Year 2023!

    Onwards and Upwards!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.1.0.Final/Debezium 2.1.1.Final Released

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    Release 2.1.0.Final did not contain a mandatory dependency. This is fixed in 2.1.1.Final hot update.

    After a plenty of intensive effort we woud like to present Nancy Xu as the leading engineer behind the implementation of the Debezium Spanner connector for Google’s Cloud Spanner distributed database. The connector itself is now in incubating state and still not fully feature complete (for example initial snapshots are not supported yet). Yet it is ready for a general use in scenarios where a robust Spanner-to-Kafka streaming implementation is required.

    The initial release provides

    As exciting these news are, this is not the only new feature available for Debezium. The release brings a nice pack of additional improvements.

    • Vitess connector supports initial snapshotting. This is a completely new feature and means that now the default behaviour for a new Vitess connector instance is to snapshot the current table content and then switch to streaming.

    • Starting Debezium 2.0 we extracted a set of interfaces to provide additional pluggable persistent stores. Redis offset and internal schema history store was converted into module and is now available for generic use.

    • MySQL connector processes TRUNCATE TABLE commands. When detected a t message is emitted into the table topic. This feature is optional and is diabled by default.

    • Kafka Connect provides so-called predicates that enable user to apply transfromations conditionally. Debezium Engine and Debezium Server supports the same functionality too and is configured in the same way as kafka Connect.

    • PostgreSQL connector is compatible with PostgreSQL 15.

    • Cassandra connector is from the very start a bit odd duckling in the way how the codebase is written and connector deployed. This meant that only Kafka was suported as a destination. The connector was rewritten so it now can run inside Debezium Server and so any supported sink can be used as the destination.

    • Nats JetStream is the new sink provided by Debezium Server.

    • Kafka Connect by default calculates the topic partition number based on the message primary key. With the new ComputePartition it is possible to define a list of per-table columns to explicitly calculate and set the partition number.

    • PostgreSQL flushes LSN (and thus truncates the WAL) when the message is recorded in Kafka. For scenarios that prefer manual WAL management it is possible to disable this behaviour.

    • MongoDB connector always connected and streamed from primary node in the cluster. This is no longer necessary and non-primary nodes are preferred.

    Other fixes & improvements

    There were many bugfixes, stability changes, and improvements throughout the development of Debezium 2.1. Altogether, a total of 149 issues were fixed for this release.

    What’s next?

    So what are our current plans for the first quarter of the next year? Some potential features you can expect include:

    • Initial work on JDBC sink connector

    • Configurable signalling channels

    • JDBC and S3 history and offset storage support

    As always, this roadmap is heavily influenced by the community, i.e. you. So if you would like to see any particular items here, please let us know.

    Merry Christmas and Happy New Year 2023!

    Onwards and Upwards!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/01/06/change-data-capture-with-questdb-and-debezium/index.html b/blog/2023/01/06/change-data-capture-with-questdb-and-debezium/index.html index b85eda4688..c05e47fc56 100644 --- a/blog/2023/01/06/change-data-capture-with-questdb-and-debezium/index.html +++ b/blog/2023/01/06/change-data-capture-with-questdb-and-debezium/index.html @@ -125,4 +125,4 @@ WHERE $__timeFilter(timestamp) and symbol = '$Symbol' -SAMPLE BY $Interval ALIGN TO CALENDAR;

    We have created a system that continuously tracks and stores the latest prices for multiple stocks in a PostgreSQL table. These prices are then fed as events to Kafka through Debezium, which captures every price change. The QuestDB Kafka connector reads these events from Kafka and stores each change as a new row in QuestDB, allowing us to retain a comprehensive history of stock prices. This history can then be analyzed and visualized using tools such as Grafana, as demonstrated by the candle chart.

    Next steps

    This sample project is a foundational reference architecture to stream data from a relational database into an optimized time series database. For existing projects that are using PostgreSQL, Debezium can be configured to start streaming data to QuestDB and take advantage of time series queries and partitioning. For databases that are also storing raw historical data, adopting Debezium may need some architectural changes. However, this is beneficial as it is an opportunity to improve performance and establish service boundaries between a transactional database and an analytical, time-series database.

    This reference architecture can also be extended to configure Kafka Connect to also stream to other data warehouses for long-term storage. After inspecting the data, QuestDB can also be configured to downsample the data for longer term storage or even detach partitions to save space.

    Give this sample application a try and join the QuestDB Slack community if you have any questions.

    Yitaek Hwang

    Yitaek is a Software Engineer at NYDIG and a guest contributor to QuestDB.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +SAMPLE BY $Interval ALIGN TO CALENDAR;

    We have created a system that continuously tracks and stores the latest prices for multiple stocks in a PostgreSQL table. These prices are then fed as events to Kafka through Debezium, which captures every price change. The QuestDB Kafka connector reads these events from Kafka and stores each change as a new row in QuestDB, allowing us to retain a comprehensive history of stock prices. This history can then be analyzed and visualized using tools such as Grafana, as demonstrated by the candle chart.

    Next steps

    This sample project is a foundational reference architecture to stream data from a relational database into an optimized time series database. For existing projects that are using PostgreSQL, Debezium can be configured to start streaming data to QuestDB and take advantage of time series queries and partitioning. For databases that are also storing raw historical data, adopting Debezium may need some architectural changes. However, this is beneficial as it is an opportunity to improve performance and establish service boundaries between a transactional database and an analytical, time-series database.

    This reference architecture can also be extended to configure Kafka Connect to also stream to other data warehouses for long-term storage. After inspecting the data, QuestDB can also be configured to downsample the data for longer term storage or even detach partitions to save space.

    Give this sample application a try and join the QuestDB Slack community if you have any questions.

    Yitaek Hwang

    Yitaek is a Software Engineer at NYDIG and a guest contributor to QuestDB.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/01/19/debezium-2-2-alpha1-released/index.html b/blog/2023/01/19/debezium-2-2-alpha1-released/index.html index dad3daf26f..fac8a0e209 100644 --- a/blog/2023/01/19/debezium-2-2-alpha1-released/index.html +++ b/blog/2023/01/19/debezium-2-2-alpha1-released/index.html @@ -17,4 +17,4 @@ debezium.sink.infinispan.server.port=<port> debezium.sink.infinispan.cache=<cache-name> debezium.sink.infinispan.user=<user> -debezium.sink.infinispan.password=<password>

    The above configuration specifies that the sink type to be used is infinispan, which enables the use of the Infinispan module. The following is a description of each of the properties shown above:

    debezium.sink.infinispan.server.host

    Specifies the host name of one of the servers in the Infinispan cluster. This configuration option can also supply a comma-separated list of hostnames as well, such as hostname1,hostname2.

    debezium.sink.infinispan.server.port

    Specifies the port of the Infinispan cluster. Defaults to 11222.

    debezium.sink.infinispan.cache

    Specifies the name of the Infinispan cache to write change events.

    The Infinispan sink requires that the cache be created manually ahead of time. This enables the ability to create the cache with any variable configuration needed to fit your requirements.

    debezium.sink.infinispan.user

    An optional configuration to specify the user to authenticate with, if authentication is required.

    debezium.sink.infinispan.password

    An optional configuration to specify the password for the authenticating user, if authentication is required.

    For more information on using Debezium Server with Infinispan, see the documentation.

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Remove option for specifying driver class from MySQL Connector DBZ-4663

    • Debezium is not working with Apicurio and custom truststores DBZ-5282

    • Show/Hide password does not work on Connectors View details screen DBZ-5322

    • Oracle cannot undo change DBZ-5907

    • Postgresql Data Loss on restarts DBZ-5915

    • Add support for Connect Headers to Debezium Server DBZ-5926

    • Oracle Multithreading lost data DBZ-5945

    • Spanner connector is missing JSR-310 dependency DBZ-5959

    • Truncate records incompatible with ExtractNewRecordState DBZ-5966

    • Computed partition must not be negative DBZ-5967

    • Table size log message for snapshot.select.statement.overrides tables not correct DBZ-5985

    • NPE in execute snapshot signal with exclude.tables config on giving wrong table name DBZ-5988

    • There is a problem with postgresql connector parsing the boundary value of money type DBZ-5991

    • Log statement for unparseable DDL statement in MySqlDatabaseSchema contains placeholder DBZ-5993

    • Postgresql connector parses the null of the money type into 0 DBZ-6001

    • Postgres LSN check should honor event.processing.failure.handling.mode DBZ-6012

    What’s Next?

    As the road to Debezium 2.2 is just starting, this initial release covers quite a lot of the features we’ve outlined our recent 2023 road map update. However, there are still a number of features that are still in active development, which include:

    • Configurable signal channels, enabling users to send signals not only from a database table or a Kafka topic, but also from other means such as an HTTP endpoint, the file system, etc.

    • The Debezium JDBC sink connector that supports native Debezium change events out-of-the-box, without requiring the use of the Event Flattening transformation.

    • A new single message transformation, ExtractChangedRecordState, that supports adding headers to the emitted event that describes that fields were changed or unchanged by the source event.

    • And a plethora of enhancements to Debezium’s UI

    As we continue development on Debezium 2.2 and bugfixes to Debezium 2.1, we would love to hear your feedback or suggestions, whether it’s regarding our road map, the changes in this release, or something you’d like to see that we haven’t mentioned. Be sure to get in touch with us on the mailing list or our chat if there is. Or if you just want to stop by and give us a "Hello", we’d welcome that too.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +debezium.sink.infinispan.password=<password>

    The above configuration specifies that the sink type to be used is infinispan, which enables the use of the Infinispan module. The following is a description of each of the properties shown above:

    debezium.sink.infinispan.server.host

    Specifies the host name of one of the servers in the Infinispan cluster. This configuration option can also supply a comma-separated list of hostnames as well, such as hostname1,hostname2.

    debezium.sink.infinispan.server.port

    Specifies the port of the Infinispan cluster. Defaults to 11222.

    debezium.sink.infinispan.cache

    Specifies the name of the Infinispan cache to write change events.

    The Infinispan sink requires that the cache be created manually ahead of time. This enables the ability to create the cache with any variable configuration needed to fit your requirements.

    debezium.sink.infinispan.user

    An optional configuration to specify the user to authenticate with, if authentication is required.

    debezium.sink.infinispan.password

    An optional configuration to specify the password for the authenticating user, if authentication is required.

    For more information on using Debezium Server with Infinispan, see the documentation.

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Remove option for specifying driver class from MySQL Connector DBZ-4663

    • Debezium is not working with Apicurio and custom truststores DBZ-5282

    • Show/Hide password does not work on Connectors View details screen DBZ-5322

    • Oracle cannot undo change DBZ-5907

    • Postgresql Data Loss on restarts DBZ-5915

    • Add support for Connect Headers to Debezium Server DBZ-5926

    • Oracle Multithreading lost data DBZ-5945

    • Spanner connector is missing JSR-310 dependency DBZ-5959

    • Truncate records incompatible with ExtractNewRecordState DBZ-5966

    • Computed partition must not be negative DBZ-5967

    • Table size log message for snapshot.select.statement.overrides tables not correct DBZ-5985

    • NPE in execute snapshot signal with exclude.tables config on giving wrong table name DBZ-5988

    • There is a problem with postgresql connector parsing the boundary value of money type DBZ-5991

    • Log statement for unparseable DDL statement in MySqlDatabaseSchema contains placeholder DBZ-5993

    • Postgresql connector parses the null of the money type into 0 DBZ-6001

    • Postgres LSN check should honor event.processing.failure.handling.mode DBZ-6012

    What’s Next?

    As the road to Debezium 2.2 is just starting, this initial release covers quite a lot of the features we’ve outlined our recent 2023 road map update. However, there are still a number of features that are still in active development, which include:

    • Configurable signal channels, enabling users to send signals not only from a database table or a Kafka topic, but also from other means such as an HTTP endpoint, the file system, etc.

    • The Debezium JDBC sink connector that supports native Debezium change events out-of-the-box, without requiring the use of the Event Flattening transformation.

    • A new single message transformation, ExtractChangedRecordState, that supports adding headers to the emitted event that describes that fields were changed or unchanged by the source event.

    • And a plethora of enhancements to Debezium’s UI

    As we continue development on Debezium 2.2 and bugfixes to Debezium 2.1, we would love to hear your feedback or suggestions, whether it’s regarding our road map, the changes in this release, or something you’d like to see that we haven’t mentioned. Be sure to get in touch with us on the mailing list or our chat if there is. Or if you just want to stop by and give us a "Hello", we’d welcome that too.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/01/24/we-are-hiring-2/index.html b/blog/2023/01/24/we-are-hiring-2/index.html index aa60db3969..5a9b04f0f0 100644 --- a/blog/2023/01/24/we-are-hiring-2/index.html +++ b/blog/2023/01/24/we-are-hiring-2/index.html @@ -1 +1 @@ - We Are Hiring (Saga continues)

    In November last year, we announced we were looking for reinforcements for the team. And I have two pieces of news for you today: a good one and an even better one.

    The good news is that we will have a new pair of hands joining the team soon. It is still early to share more details, but we are happy to see the new guy with us!

    How about the better news? Well, we can have one more person! The process has changed a bit since the last time, but everything else holds! So if you are interested then please apply via the Red Hat job portal.

    Don’t be shy, and don’t underestimate yourself. We would rather speak to more people than miss you!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + We Are Hiring (Saga continues)

    In November last year, we announced we were looking for reinforcements for the team. And I have two pieces of news for you today: a good one and an even better one.

    The good news is that we will have a new pair of hands joining the team soon. It is still early to share more details, but we are happy to see the new guy with us!

    How about the better news? Well, we can have one more person! The process has changed a bit since the last time, but everything else holds! So if you are interested then please apply via the Red Hat job portal.

    Don’t be shy, and don’t underestimate yourself. We would rather speak to more people than miss you!

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/01/26/debezium-2-1-2-final-released/index.html b/blog/2023/01/26/debezium-2-1-2-final-released/index.html index 9bf7b3c7c9..bb229826dc 100644 --- a/blog/2023/01/26/debezium-2-1-2-final-released/index.html +++ b/blog/2023/01/26/debezium-2-1-2-final-released/index.html @@ -1 +1 @@ - Debezium 2.1.2.Final Released

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    Breaking Change

    An edge case was reported in DBZ-5996 where if a temporal column used ZonedTimestamp and if the column’s value had 0 micro or nanoseconds, rather than emitting the value as 2023-01-19T12:30:00.123000Z, the value would be emitted in a truncated way as 2023-01-19T12:30:00.123Z. This could lead to other issues with converters used in the event pipeline when the output from that column could be formatted inconsistently.

    In order to remedy the edge case, the ZonedTimestamp implementation will now pad the fraction-based seconds value of the column’s value to the length/scale of the source database column. Using the example above of a TIMESTAMP(6) MySQL column type, the emitted value will now properly reflect a value of 2023-01-19T12:30:00.123000Z.

    While this change in behavior is likely to have minimal impact to most users, we wanted to bring attention to it in the event that you’ve perhaps used other means to handle this edge case in your pipelines. If you have, you should be able to rely on Debezium to emit the value consistently, even when the fraction-based seconds is 0.

    Other changes

    A few noteworthy bug fixes and stability improvements include:

    • Data type conversion failed for mysql bigint DBZ-5798

    • Oracle cannot undo change DBZ-5907

    • Truncate records incompatible with ExtractNewRecordState DBZ-5966

    • Computed partition must not be negative DBZ-5967

    • NPE in execute snapshot signal with exclude.tables config on giving wrong table name DBZ-5988

    • There is a problem with postgresql connector parsing the boundary value of money type DBZ-5991

    • Nullable columns marked with "optional: false" in DDL events DBZ-6003

    • Vitess: Handle the shard list difference between current db shards and persisted shards DBZ-6011

    • Postgres LSN check should honor event.processing.failure.handling.mode DBZ-6012

    • Enhance the Spanner connector by adding features and/or solving bugs DBZ-6014

    • DDL statement with TokuDB engine specific "CLUSTERING KEY" couldn’t be parsed DBZ-6016

    • DDL parse fail for role revoke with "user-like" role name DBZ-6019

    • DDL parse fail for ALTER USER x DEFAULT ROLE y; DBZ-6020

    • Offsets are not flushed on connect offsets topic when encountering an error on Postgres connector DBZ-6026

    • Unexpected format for TIME column: 8:00 DBZ-6029

    • Oracle does not support compression/logging clauses after an LOB storage clause DBZ-6031

    • Debezium is logging the full message along with the error DBZ-6037

    • Improve resilience during internal schema history recovery from Kafka DBZ-6039

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community who contributed to Debezium 2.1.2.Final: Akshansh Jain, Animesh Kumar, Anisha Mohanty, Bob Roldan, Chris Cranford, Harvey Yue, Henry Cai, Indra Shukla, Jiri Pechanec, Luca Scannapieco, Mario Fiore Vitale, Mark Lambert, Sergei Morozov, Vojtech Juranek, Yohei Yoshimuta, and yohei yoshimuta!

    Outlook, What’s next?

    Debezium 2.1 will continue to receive bug fix and maintenance changes throughout this quarter. I expect there will be at least one additional release likely toward the middle or late February or March timeframe as we begin to wrap up up the work on Debezium 2.2.

    Regarding Debezium 2.2, we intend to deliver another Alpha build in the coming weeks. We have lots of features still in the works, including the JDBC Sink Connector, configurable signal channels, new message transformations, and much more.

    Stay tuned, and until then …​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.1.2.Final Released

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    Breaking Change

    An edge case was reported in DBZ-5996 where if a temporal column used ZonedTimestamp and if the column’s value had 0 micro or nanoseconds, rather than emitting the value as 2023-01-19T12:30:00.123000Z, the value would be emitted in a truncated way as 2023-01-19T12:30:00.123Z. This could lead to other issues with converters used in the event pipeline when the output from that column could be formatted inconsistently.

    In order to remedy the edge case, the ZonedTimestamp implementation will now pad the fraction-based seconds value of the column’s value to the length/scale of the source database column. Using the example above of a TIMESTAMP(6) MySQL column type, the emitted value will now properly reflect a value of 2023-01-19T12:30:00.123000Z.

    While this change in behavior is likely to have minimal impact to most users, we wanted to bring attention to it in the event that you’ve perhaps used other means to handle this edge case in your pipelines. If you have, you should be able to rely on Debezium to emit the value consistently, even when the fraction-based seconds is 0.

    Other changes

    A few noteworthy bug fixes and stability improvements include:

    • Data type conversion failed for mysql bigint DBZ-5798

    • Oracle cannot undo change DBZ-5907

    • Truncate records incompatible with ExtractNewRecordState DBZ-5966

    • Computed partition must not be negative DBZ-5967

    • NPE in execute snapshot signal with exclude.tables config on giving wrong table name DBZ-5988

    • There is a problem with postgresql connector parsing the boundary value of money type DBZ-5991

    • Nullable columns marked with "optional: false" in DDL events DBZ-6003

    • Vitess: Handle the shard list difference between current db shards and persisted shards DBZ-6011

    • Postgres LSN check should honor event.processing.failure.handling.mode DBZ-6012

    • Enhance the Spanner connector by adding features and/or solving bugs DBZ-6014

    • DDL statement with TokuDB engine specific "CLUSTERING KEY" couldn’t be parsed DBZ-6016

    • DDL parse fail for role revoke with "user-like" role name DBZ-6019

    • DDL parse fail for ALTER USER x DEFAULT ROLE y; DBZ-6020

    • Offsets are not flushed on connect offsets topic when encountering an error on Postgres connector DBZ-6026

    • Unexpected format for TIME column: 8:00 DBZ-6029

    • Oracle does not support compression/logging clauses after an LOB storage clause DBZ-6031

    • Debezium is logging the full message along with the error DBZ-6037

    • Improve resilience during internal schema history recovery from Kafka DBZ-6039

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community who contributed to Debezium 2.1.2.Final: Akshansh Jain, Animesh Kumar, Anisha Mohanty, Bob Roldan, Chris Cranford, Harvey Yue, Henry Cai, Indra Shukla, Jiri Pechanec, Luca Scannapieco, Mario Fiore Vitale, Mark Lambert, Sergei Morozov, Vojtech Juranek, Yohei Yoshimuta, and yohei yoshimuta!

    Outlook, What’s next?

    Debezium 2.1 will continue to receive bug fix and maintenance changes throughout this quarter. I expect there will be at least one additional release likely toward the middle or late February or March timeframe as we begin to wrap up up the work on Debezium 2.2.

    Regarding Debezium 2.2, we intend to deliver another Alpha build in the coming weeks. We have lots of features still in the works, including the JDBC Sink Connector, configurable signal channels, new message transformations, and much more.

    Stay tuned, and until then …​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/02/04/ddd-aggregates-via-cdc-cqrs-pipeline-using-kafka-and-debezium/index.html b/blog/2023/02/04/ddd-aggregates-via-cdc-cqrs-pipeline-using-kafka-and-debezium/index.html index 0492082d34..c62bb3c9aa 100644 --- a/blog/2023/02/04/ddd-aggregates-via-cdc-cqrs-pipeline-using-kafka-and-debezium/index.html +++ b/blog/2023/02/04/ddd-aggregates-via-cdc-cqrs-pipeline-using-kafka-and-debezium/index.html @@ -64,4 +64,4 @@ "transforms.hv.type": "org.apache.kafka.connect.transforms.HoistField$Value", "transforms.hv.field": "order" } -}

    Query Database: MongoDB

    The DDD aggregate is written to the database order_db in the collection order on MongoDB. The order-id becomes the _id of the table and the order column stores the order-aggregate as JSON.

    REST Application: order-read-service

    The Order Aggregate persisted in MongoDB is served via a REST endpoint in order-read-service.

    • GET: api/order/{order-id} to retrieve the order from the MongoDB database

    Execution Instructions

    The complete source code for this blog post is provided here in Github. Begin by cloning this repository and changing into the cdc-cqrs-pipeline directory. The project provides a Docker Compose file with services for all the components:

    • MySQL

    • Adminer (formerly known as phpMinAdmin), to manage MySQL via browser

    • MongoDB

    • Mongo Express, to manage MongoDB via browser

    • Zookeeper

    • Confluent Kafka

    • Kafka Connect

    Once all services have started, register an instance of the Debezium MySQL connector & MongoDB Connector by executing the Create-MySQL-Debezium-Connector and Create-MongoDB-Sink-Connector request respectively from cdc-cqrs-pipeline.postman_collection.json. Execute the request Get-All-Connectors to verify that the connectors have been properly created.

    Change into the individual directories and spin-up the three Spring-Boot applications:

    • order-write-service: runs on port no 8070

    • order-aggregation-service: runs on port no 8071

    • order-read-service: runs on port no 8072

    With this, our setup is complete.

    To test the application, execute the request Post-Shipping-Details from the postman collection to insert shipping-details and Post-Item-Details to insert item-details for a particular order id.

    Finally, execute the Get-Order-By-Order-Id request in the postman collection to retrieve the complete Order Aggregate.

    Summary

    Apache Kafka acts as a highly scalable and reliable backbone for the messaging amongst the services. Putting Apache Kafka into the center of the overall architecture also ensures a decoupling of involved services. If for instance single components of the solution fail or are not available for some time, events will simply be processed later on: after a restart, the Debezium connector will continue to tail the relevant tables from the point where it left off before. Similarly, any consumer will continue to process topics from its previous offset. By keeping track of already successfully processed messages, duplicates can be detected and excluded from repeated handling.

    Naturally, such event pipeline between different services is eventually consistent, i.e. consumers such as the order-read-service may lag a bit behind producers such as the order-write-service. Usually, that’s just fine, though, and can be handled in terms of the application’s business logic. Also, end-to-end delays of the overall solution are typically low (seconds or even sub-second range), thanks to log-based change data capture which allows for emission of events in near-realtime.

    Purnima Jain

    Purnima is a Senior Software Architect with extensive industry experience and has been working on software-development projects in various Banking & Financial organizations.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    Query Database: MongoDB

    The DDD aggregate is written to the database order_db in the collection order on MongoDB. The order-id becomes the _id of the table and the order column stores the order-aggregate as JSON.

    REST Application: order-read-service

    The Order Aggregate persisted in MongoDB is served via a REST endpoint in order-read-service.

    • GET: api/order/{order-id} to retrieve the order from the MongoDB database

    Execution Instructions

    The complete source code for this blog post is provided here in Github. Begin by cloning this repository and changing into the cdc-cqrs-pipeline directory. The project provides a Docker Compose file with services for all the components:

    • MySQL

    • Adminer (formerly known as phpMinAdmin), to manage MySQL via browser

    • MongoDB

    • Mongo Express, to manage MongoDB via browser

    • Zookeeper

    • Confluent Kafka

    • Kafka Connect

    Once all services have started, register an instance of the Debezium MySQL connector & MongoDB Connector by executing the Create-MySQL-Debezium-Connector and Create-MongoDB-Sink-Connector request respectively from cdc-cqrs-pipeline.postman_collection.json. Execute the request Get-All-Connectors to verify that the connectors have been properly created.

    Change into the individual directories and spin-up the three Spring-Boot applications:

    • order-write-service: runs on port no 8070

    • order-aggregation-service: runs on port no 8071

    • order-read-service: runs on port no 8072

    With this, our setup is complete.

    To test the application, execute the request Post-Shipping-Details from the postman collection to insert shipping-details and Post-Item-Details to insert item-details for a particular order id.

    Finally, execute the Get-Order-By-Order-Id request in the postman collection to retrieve the complete Order Aggregate.

    Summary

    Apache Kafka acts as a highly scalable and reliable backbone for the messaging amongst the services. Putting Apache Kafka into the center of the overall architecture also ensures a decoupling of involved services. If for instance single components of the solution fail or are not available for some time, events will simply be processed later on: after a restart, the Debezium connector will continue to tail the relevant tables from the point where it left off before. Similarly, any consumer will continue to process topics from its previous offset. By keeping track of already successfully processed messages, duplicates can be detected and excluded from repeated handling.

    Naturally, such event pipeline between different services is eventually consistent, i.e. consumers such as the order-read-service may lag a bit behind producers such as the order-write-service. Usually, that’s just fine, though, and can be handled in terms of the application’s business logic. Also, end-to-end delays of the overall solution are typically low (seconds or even sub-second range), thanks to log-based change data capture which allows for emission of events in near-realtime.

    Purnima Jain

    Purnima is a Senior Software Architect with extensive industry experience and has been working on software-development projects in various Banking & Financial organizations.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/02/16/debezium-2-2-alpha2-released/index.html b/blog/2023/02/16/debezium-2-2-alpha2-released/index.html index 5b8da1f6ea..9dc91c489a 100644 --- a/blog/2023/02/16/debezium-2-2-alpha2-released/index.html +++ b/blog/2023/02/16/debezium-2-2-alpha2-released/index.html @@ -16,4 +16,4 @@ schema.history.internal.rocketmq.secret.key=<rocketmq-secret-key> schema.history.internal.rocketmq.recovery.attempts=5 schema.history.internal.rocketmq.recovery.poll.interval.ms=1000 -schema.history.internal.rocketmq.store.record.timeout.ms=2000
    schema.history.internal.rocketmq.topic

    Specifies the topic name where the schema history will be stored.

    schema.history.internal.rocketmq.name.srv.addr

    Specifies the service discovery service nameserver for Rocket MQ.

    schema.history.internal.rocketmq.acl.enabled

    Specifies whether access control lists (ACLs) are enabled, defaults to false.

    schema.history.internal.rocketmq.access.key

    Specifies the Rocket MQ access key, required only if ACLs are enabled.

    schema.history.internal.rocketmq.secret.key

    Specifies the Rocket MQ secret key, required only if ACLs are enabled.

    schema.history.internal.rocketmq.recovery.attempts

    Specifies the number of sequential attempts that no data is returned before recovery completes.

    schema.history.internal.rocketmq.recovery.poll.interval.ms

    Specifies the number of milliseconds for each poll attempt to recover the history.

    schema.history.internal.rocketmq.store.record.timeout.ms

    Specifies the number of milliseconds for a write to Rocket MQ to complete before timing out.

    Other fixes

    There were quite a number of other improvements, bug fixes, and stability changes in this release, some noteworthy are:

    • Better control on debezium GTID usage DBZ-2296

    • Data type conversion failed for mysql bigint DBZ-5798

    • ActivateTracingSpan wrong timestamps reported DBZ-5827

    • Unable to specify column or table include list if name contains a backslash \ DBZ-5917

    • debezium-connector-cassandra 2.1.0.Alpha2 plugin can no longer run "out of the box" DBZ-5925

    • MongoDB Incremental Snapshot not Working DBZ-5973

    • Nullable columns marked with "optional: false" in DDL events DBZ-6003

    • Upgrade to Quarkus 2.16.0.Final DBZ-6005

    • Vitess: Handle the shard list difference between current db shards and persisted shards DBZ-6011

    • Offsets are not flushed on connect offsets topic when encountering an error on Postgres connector DBZ-6026

    • Unexpected format for TIME column: 8:00 DBZ-6029

    • Oracle does not support compression/logging clauses after an LOB storage clause DBZ-6031

    • debezium-server Pulsar support non-default tenant and namespace DBZ-6033

    • Debezium is logging the full message along with the error DBZ-6037

    • Improve resilience during internal schema history recovery from Kafka DBZ-6039

    • Vitess: Support Mapping unsigned bigint mysql column type to long DBZ-6043

    • Incremental snapshot sends the events from signalling DB to Kafka DBZ-6051

    • Upgrade Kafka to 3.3.2 DBZ-6054

    • Mask password in log statement DBZ-6064

    • Loading Custom offset storage fails with Class not found error DBZ-6075

    • Increase query.fetch.size default to something sensible above zero DBZ-6079

    • SQL Server tasks fail if the number of databases is smaller than maxTasks DBZ-6084

    • When using LOB support, an UPDATE against multiple rows can lead to inconsistent event data DBZ-6107

    • Expose sequence field in CloudEvents message id DBZ-6089

    • Reduce verbosity of skipped transactions if transaction has no events relevant to captured tables DBZ-6094

    • Upgrade Kafka client to 3.4.0 DBZ-6102

    What’s Next?

    We’re still very early in the development cycle of Debezium 2.2 and many other features are still in development, including:

    • Configurable signal channels, enabling users to send signals not only from a database table or a Kafka topic, but also from other means such as an HTTP endpoint, the file system, etc.

    • The Debezium JDBC sink connector that supports native Debezium change events out-of-the-box, without requiring the use of the Event Flattening transformation.

    • And a plethora of Debezium UI enhancements

    We are about middle way through the quarter and Debezium 2.2 will begin to enter beta phase very soon. We would love to hear your feedback or suggestions regarding the roadmap, changes in this release, those that are outstanding, or anything we haven’t mentioned. Be sure to get in touch with us on the mailing list or our chat if there is.

    Also be on the lookout for our first installment of our 2023 Newsletter as well as the upcoming and conclusion to the blog series, "Debezium for Oracle" where I cover performance, debugging, and frequently asked questions about the Oracle connector.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +schema.history.internal.rocketmq.store.record.timeout.ms=2000
    schema.history.internal.rocketmq.topic

    Specifies the topic name where the schema history will be stored.

    schema.history.internal.rocketmq.name.srv.addr

    Specifies the service discovery service nameserver for Rocket MQ.

    schema.history.internal.rocketmq.acl.enabled

    Specifies whether access control lists (ACLs) are enabled, defaults to false.

    schema.history.internal.rocketmq.access.key

    Specifies the Rocket MQ access key, required only if ACLs are enabled.

    schema.history.internal.rocketmq.secret.key

    Specifies the Rocket MQ secret key, required only if ACLs are enabled.

    schema.history.internal.rocketmq.recovery.attempts

    Specifies the number of sequential attempts that no data is returned before recovery completes.

    schema.history.internal.rocketmq.recovery.poll.interval.ms

    Specifies the number of milliseconds for each poll attempt to recover the history.

    schema.history.internal.rocketmq.store.record.timeout.ms

    Specifies the number of milliseconds for a write to Rocket MQ to complete before timing out.

    Other fixes

    There were quite a number of other improvements, bug fixes, and stability changes in this release, some noteworthy are:

    • Better control on debezium GTID usage DBZ-2296

    • Data type conversion failed for mysql bigint DBZ-5798

    • ActivateTracingSpan wrong timestamps reported DBZ-5827

    • Unable to specify column or table include list if name contains a backslash \ DBZ-5917

    • debezium-connector-cassandra 2.1.0.Alpha2 plugin can no longer run "out of the box" DBZ-5925

    • MongoDB Incremental Snapshot not Working DBZ-5973

    • Nullable columns marked with "optional: false" in DDL events DBZ-6003

    • Upgrade to Quarkus 2.16.0.Final DBZ-6005

    • Vitess: Handle the shard list difference between current db shards and persisted shards DBZ-6011

    • Offsets are not flushed on connect offsets topic when encountering an error on Postgres connector DBZ-6026

    • Unexpected format for TIME column: 8:00 DBZ-6029

    • Oracle does not support compression/logging clauses after an LOB storage clause DBZ-6031

    • debezium-server Pulsar support non-default tenant and namespace DBZ-6033

    • Debezium is logging the full message along with the error DBZ-6037

    • Improve resilience during internal schema history recovery from Kafka DBZ-6039

    • Vitess: Support Mapping unsigned bigint mysql column type to long DBZ-6043

    • Incremental snapshot sends the events from signalling DB to Kafka DBZ-6051

    • Upgrade Kafka to 3.3.2 DBZ-6054

    • Mask password in log statement DBZ-6064

    • Loading Custom offset storage fails with Class not found error DBZ-6075

    • Increase query.fetch.size default to something sensible above zero DBZ-6079

    • SQL Server tasks fail if the number of databases is smaller than maxTasks DBZ-6084

    • When using LOB support, an UPDATE against multiple rows can lead to inconsistent event data DBZ-6107

    • Expose sequence field in CloudEvents message id DBZ-6089

    • Reduce verbosity of skipped transactions if transaction has no events relevant to captured tables DBZ-6094

    • Upgrade Kafka client to 3.4.0 DBZ-6102

    What’s Next?

    We’re still very early in the development cycle of Debezium 2.2 and many other features are still in development, including:

    • Configurable signal channels, enabling users to send signals not only from a database table or a Kafka topic, but also from other means such as an HTTP endpoint, the file system, etc.

    • The Debezium JDBC sink connector that supports native Debezium change events out-of-the-box, without requiring the use of the Event Flattening transformation.

    • And a plethora of Debezium UI enhancements

    We are about middle way through the quarter and Debezium 2.2 will begin to enter beta phase very soon. We would love to hear your feedback or suggestions regarding the roadmap, changes in this release, those that are outstanding, or anything we haven’t mentioned. Be sure to get in touch with us on the mailing list or our chat if there is.

    Also be on the lookout for our first installment of our 2023 Newsletter as well as the upcoming and conclusion to the blog series, "Debezium for Oracle" where I cover performance, debugging, and frequently asked questions about the Oracle connector.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/03/08/debezium-2-2-alpha3-released/index.html b/blog/2023/03/08/debezium-2-2-alpha3-released/index.html index 89560dc68b..1ceba8dcdd 100644 --- a/blog/2023/03/08/debezium-2-2-alpha3-released/index.html +++ b/blog/2023/03/08/debezium-2-2-alpha3-released/index.html @@ -1,4 +1,4 @@ Debezium 2.2.0.Alpha3 Released

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Breaking Changes

    We typically try to avoid any breaking changes, even during minor releases such as this; however, sometimes breaking changes are inevitable given the circumstances. Debezium 2.2.0.Alpha3 includes one breaking change:

    PostgreSQL zoned date-time data types truncated

    It was identified (DBZ-6163) that PostgreSQL timezone based column values that had a value of zero (0) for milli and micro second parts of a timezone based column were being serialized incorrectly where the string did not include neither the millisecond nor the microsecond portions of the time using zeroes.

    This does not create any data loss!

    What’s important to note is that prior to this release, when evaluating the values of such columns, consumers must be prepared to parse these string-based time values without the presence of a milli or microsecond value. In effect, this means events have an inconsistent pattern where some will have the milli and microsecond portions and others may not if their source value had 0 milliseconds or 0 microseconds.

    These string-based time values will be be emitted consistently, padded with zeroes (0) for the milli and microsecond parts of the string-based time, even when the source value has neither milli nor microseconds.

    Optional parallel snapshots

    Debezium’s relational database initial snapshot process has always been single-threaded. This limitation primarily stems from the complexities of ensuring data consistency across multiple transactions.

    Starting in Debezium 2.2, we’re adding a new and initially optional way to utilize multiple threads to perform consistent database snapshot for a connector. This implementation uses these multiple threads to execute table-level snapshots in parallel.

    In order to take advantage of this new feature, specify snapshot.max.threads in your connector’s configuration and when this property has a value greater than 1, parallel snapshots will be used.

    Example configuration using parallel snapshots
    snapshot.max.threads=4

    In the example above, if the connector needs to snapshot more than 4 tables, there will be at most 4 tables being snapshot in parallel. When one thread finishes processing a table, it will get a new table to snapshot from the queue and the process continues until all tables have been snapshot.

    This feature is considered incubating, but we strongly suggest that new connector deployments give this feature a try. We would welcome any and all feedback on how to improve this going forward.

    MongoDB server-side change stream filtering

    Debezium presently subscribes to the MongoDB change stream and evaluates whether an event is of relevance or not on the connector side. On the surface, there is nothing technically wrong with this approach, it has worked well; however, a recent contributor explained how this decision impacts them.

    Overall, the current process effectively serializes across the network all changes from MongoDB to the connector. If you have a lower volume of changes, you likely don’t see any issue with this approach; however, in a high volume scenario, especially when you’re only interested in a subset of the data generated by change streams, you quickly begin to see how this approach is inefficient. Furthermore, if you’re running the connector in a cloud environment like AWS, you’ll likely see in a high volume scenario where utilization costs could be impacted.

    By moving where the include/exclude list filters are evaluated from the connector to the MongoDB server’s change stream subscription, this adds a number of advantages for all MongoDB connector users.

    By reducing the number of events seen by connector, this impacts both network and CPU utilization. When events are sent that the connector simply discards due to include/exclude filters, this leads to network usage that could be avoided. When the connector is configured with full document or pre-image settings, this adds even more utilization to the network that is entirely unnecessary. Furthermore, by receiving more events than the connector configuration is interested in, this leads to the connector doing more processing, raising CPU utilization.

    While network and CPU utilization are critical regardless of one’s environment, these are often more scrutinized when operating a cloud-based environments as these two metrics directly impact the operating budget. Users should see an overall lower network and CPU utilization with Debezium MongoDB 2.2 connectors.

    We hope to share more details the benefits of this change in a future blog post, so stay tuned!

    Incremental snapshot surrogate key support

    Debezium’s incremental snapshot feature has been a tremendous success. It provides an efficient way to perform a consist snapshot of data that can be resumed, which is critical when the snapshot consists of large volumes of data.

    However, incremental snapshots do have specific requirements that must be met before the feature can be used. One of those requirements is all tables being snapshot must use a primary key. You may ask, why does a table have no primary key, and we aren’t going to debate that here today; however, suffice to say this occurs more often than you may think.

    With Debezium 2.2, incremental snapshots can be performed on key-less tables as long as there is one column that is unique and can be considered a "surrogate key" for incremental snapshot purposes.

    The surrogate key feature is not supported by MongoDB; only relational connectors.

    To provide the surrogate key column data in an incremental snapshot signal, the signal’s payload must include the new surrogate key attribute, surrogate-key.

    An example incremental snapshot signal payload specifying a surrogate key
    {
       "data-collections": [ "public.mytab" ],
       "surrogate-key": "customer_ref"
    -}

    In the above example, an incremental snapshot will be started for table public.mytab and the incremental snapshot will use the customer_ref column as the primary key for generating the snapshot windows.

    A surrogate key cannot be defined using multiple columns, only a single column.

    However, the surrogate key feature isn’t just applicable for tables with no primary keys. There are a series of advantages when using this feature with tables that have primary keys:

    1. One clear advantage is when the table’s primary key consists of multiple columns. The query generates a disjunction predicate for each column in the primary key, and it’s performance is highly dependent on the environment. Reducing the number of columns down to a single column often performs universally.

    2. Another advantage is when the surrogate key is based on a numeric data type while the primary key column is based on a character-based data type. Relational databases generally perform predicate evaluation more efficiently with numeric comparisons rather than character comparisons. By adjusting the query to use a numeric data type in this case, query performance could be better.

    Other fixes

    There were quite a number of other improvements, bug fixes, and stability changes in this release, some noteworthy are:

    • When using snapshot.collection.include.list, relational schema isn’t populated correctly DBZ-3594

    • Debezium UI should use fast-jar again with Quarkus 2.x DBZ-4621

    • Create a Datastax connector based on Cassandra connector DBZ-5951

    • Add support for honouring MongoDB read preference in change stream after promotion DBZ-5953

    • Add support for header to all Debezium Server sinks DBZ-6017

    • GCP Spanner connector start failing when there are multiple indexes on a single column DBZ-6101

    • Negative remaining attempts on MongoDB reconnect case DBZ-6113

    • Support String type for key in Mongo incremental snapshot DBZ-6116

    • Tables with spaces or non-ASCII characters in their name are not captured by Oracle because they must be quoted. DBZ-6120

    • Offsets are not advanced in a CDB deployment with low frequency of changes to PDB DBZ-6125

    • Allow TestContainers test framework to expose ConnectorConfiguration as JSON DBZ-6136

    • Oracle TIMESTAMP WITH TIME ZONE is emitted as GMT during snapshot rather than the specified TZ DBZ-6143

    • Upgrade impsort-maven-plugin from 1.7.0 to 1.8.0 DBZ-6144

    • Debezium UI E2E Frontend build failing randomly with corrupted Node 16 tar file DBZ-6146

    • Debezium UI SQL Server tests randomly fail due to slow agent start-up DBZ-6149

    • Upgrade Quarkus dependencies to 2.16.3.Final DBZ-6150

    • Remove hardcoded list of system database exclusions that are not required for change streaming DBZ-6152

    • RelationalSnapshotChangeEventSource swallows exception generated during snapshot DBZ-6179

    • Create SSL scenarios for integration tests for MySQL connector DBZ-6184

    Outlook & What’s Next?

    In addition, we are nearing the end of the Debezium 2.2 development cycle. Assuming no unexpected problems, we do intend to release Beta1 next week, followed by a release candidate two weeks thereafter. Our goal is to finalize the Debezium 2.2 release in late March or early April at the latest.

    We would love to hear your feedback or suggestions about our roadmap, changes in this release, or any that are outstanding or that we may haven’t mentioned. Be sure to get in touch with us on the mailing list or our chat if there is.

    Also, the DevNexus 2023 conference is coming up in early April in Atlanta, and I have the privilege to be a guest speaker discussing Debezium and CDC patterns. Be sure to check out that talk in person if you have an opportunity!

    And finally, be on the lookout for our first installment of our 2023 Newsletter later this month. I also will be wrapping up the blog series, "Debezium for Oracle" where I cover performance, debugging, and frequently asked questions about the Oracle connector.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    In the above example, an incremental snapshot will be started for table public.mytab and the incremental snapshot will use the customer_ref column as the primary key for generating the snapshot windows.

    A surrogate key cannot be defined using multiple columns, only a single column.

    However, the surrogate key feature isn’t just applicable for tables with no primary keys. There are a series of advantages when using this feature with tables that have primary keys:

    1. One clear advantage is when the table’s primary key consists of multiple columns. The query generates a disjunction predicate for each column in the primary key, and it’s performance is highly dependent on the environment. Reducing the number of columns down to a single column often performs universally.

    2. Another advantage is when the surrogate key is based on a numeric data type while the primary key column is based on a character-based data type. Relational databases generally perform predicate evaluation more efficiently with numeric comparisons rather than character comparisons. By adjusting the query to use a numeric data type in this case, query performance could be better.

    Other fixes

    There were quite a number of other improvements, bug fixes, and stability changes in this release, some noteworthy are:

    • When using snapshot.collection.include.list, relational schema isn’t populated correctly DBZ-3594

    • Debezium UI should use fast-jar again with Quarkus 2.x DBZ-4621

    • Create a Datastax connector based on Cassandra connector DBZ-5951

    • Add support for honouring MongoDB read preference in change stream after promotion DBZ-5953

    • Add support for header to all Debezium Server sinks DBZ-6017

    • GCP Spanner connector start failing when there are multiple indexes on a single column DBZ-6101

    • Negative remaining attempts on MongoDB reconnect case DBZ-6113

    • Support String type for key in Mongo incremental snapshot DBZ-6116

    • Tables with spaces or non-ASCII characters in their name are not captured by Oracle because they must be quoted. DBZ-6120

    • Offsets are not advanced in a CDB deployment with low frequency of changes to PDB DBZ-6125

    • Allow TestContainers test framework to expose ConnectorConfiguration as JSON DBZ-6136

    • Oracle TIMESTAMP WITH TIME ZONE is emitted as GMT during snapshot rather than the specified TZ DBZ-6143

    • Upgrade impsort-maven-plugin from 1.7.0 to 1.8.0 DBZ-6144

    • Debezium UI E2E Frontend build failing randomly with corrupted Node 16 tar file DBZ-6146

    • Debezium UI SQL Server tests randomly fail due to slow agent start-up DBZ-6149

    • Upgrade Quarkus dependencies to 2.16.3.Final DBZ-6150

    • Remove hardcoded list of system database exclusions that are not required for change streaming DBZ-6152

    • RelationalSnapshotChangeEventSource swallows exception generated during snapshot DBZ-6179

    • Create SSL scenarios for integration tests for MySQL connector DBZ-6184

    Outlook & What’s Next?

    In addition, we are nearing the end of the Debezium 2.2 development cycle. Assuming no unexpected problems, we do intend to release Beta1 next week, followed by a release candidate two weeks thereafter. Our goal is to finalize the Debezium 2.2 release in late March or early April at the latest.

    We would love to hear your feedback or suggestions about our roadmap, changes in this release, or any that are outstanding or that we may haven’t mentioned. Be sure to get in touch with us on the mailing list or our chat if there is.

    Also, the DevNexus 2023 conference is coming up in early April in Atlanta, and I have the privilege to be a guest speaker discussing Debezium and CDC patterns. Be sure to check out that talk in person if you have an opportunity!

    And finally, be on the lookout for our first installment of our 2023 Newsletter later this month. I also will be wrapping up the blog series, "Debezium for Oracle" where I cover performance, debugging, and frequently asked questions about the Oracle connector.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/03/09/hello-debezium/index.html b/blog/2023/03/09/hello-debezium/index.html index 6462b95f05..37201e424d 100644 --- a/blog/2023/03/09/hello-debezium/index.html +++ b/blog/2023/03/09/hello-debezium/index.html @@ -1 +1 @@ - Hello Debezium Team!

    Hi everyone, my name is Mario Fiore Vitale and I recently joined Red Hat and the Debezium team.

    I am a very curious person that follows a continuous learning approach, I like to keep growing my skills. I care about code quality and readability.

    I have about 9+ years of experience and have worked for consultancy, startup, and enterprise product companies in different sectors. In my previously experience I had the chance to work on architecture re-design project to split a monolith into a microservices application. During this experience I gained experience with different technologies such as Kafka, Elasticsearch, Redis, Kubernetes, VictoriaMetrics, Spring Framework, and a bit of Cassandra.

    Why Am I here?

    First of all, I have always been fascinated by OSS and the power of the "community". During my career I have used a lot of OSS and when I had the chance to give back to the community I didn’t back down.

    In recent years data are becoming more and more important, in terms of their role for companies but also in terms of quantity. The way we manage these data is now crucial. Data comes from different source, asynchronously and must be shared with different consumers. So we need to continuously process incoming data, and this is where Event Stream Processing comes in. Debezium can act as a facilitator for Stream processing, enabling a lot of useful use cases for modern software architecture. This is why I like Debezium.

    I’m thrilled to be here and looking forward to working with this amazing community.

    Onwards,

    --Mario

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Hello Debezium Team!

    Hi everyone, my name is Mario Fiore Vitale and I recently joined Red Hat and the Debezium team.

    I am a very curious person that follows a continuous learning approach, I like to keep growing my skills. I care about code quality and readability.

    I have about 9+ years of experience and have worked for consultancy, startup, and enterprise product companies in different sectors. In my previously experience I had the chance to work on architecture re-design project to split a monolith into a microservices application. During this experience I gained experience with different technologies such as Kafka, Elasticsearch, Redis, Kubernetes, VictoriaMetrics, Spring Framework, and a bit of Cassandra.

    Why Am I here?

    First of all, I have always been fascinated by OSS and the power of the "community". During my career I have used a lot of OSS and when I had the chance to give back to the community I didn’t back down.

    In recent years data are becoming more and more important, in terms of their role for companies but also in terms of quantity. The way we manage these data is now crucial. Data comes from different source, asynchronously and must be shared with different consumers. So we need to continuously process incoming data, and this is where Event Stream Processing comes in. Debezium can act as a facilitator for Stream processing, enabling a lot of useful use cases for modern software architecture. This is why I like Debezium.

    I’m thrilled to be here and looking forward to working with this amazing community.

    Onwards,

    --Mario

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/04/03/debezium-2-2-beta1-released/index.html b/blog/2023/04/03/debezium-2-2-beta1-released/index.html index 71608c7d19..94edbef87c 100644 --- a/blog/2023/04/03/debezium-2-2-beta1-released/index.html +++ b/blog/2023/04/03/debezium-2-2-beta1-released/index.html @@ -21,4 +21,4 @@ debezium.sink.rabbitmq.routingKey=<routing-key> # The default is 30 seconds, specified in milliseconds -debezium.sink.rabbitmq.ackTimeout=30000

    The debezium.sink.rabbitmq.connection.* properties are required while the latter two properties for routingKey and ackTimeout are optional or have preset defaults that should be sufficient for most use cases.

    Other fixes

    There were quite a number of other improvements, bug fixes, and stability changes in this release, some noteworthy are:

    • Create an endpoint to update a connector DBZ-5314

    • Refactor snapshotting to use change streams instead of oplog DBZ-5987

    • Update the design for Debezium based connectors Filter step DBZ-6060

    • NPE when setting schema.history.internal.store.only.captured.tables.ddl=true DBZ-6072

    • Postgres connector stuck when replication slot does not have confirmed_flush_lsn DBZ-6092

    • java.lang.NullPointerException in MySQL connector with max.queue.size.in.bytes DBZ-6104

    • debezium-connector-mysql failed to parse serveral DDLs of 'CREATE TABLE' DBZ-6124

    • Connect and stream from sharded clusters through mongos instances DBZ-6170

    • Support Azure blob storage as Debezium history storage DBZ-6180

    • Zerofill property failed for different int types DBZ-6185

    • GRANT DELETE HISTORY couldn’t be parsed in mariadb DBZ-6186

    • ddl parse failed for key partition table DBZ-6188

    • Config options internal.schema.history.internal.ddl.filter not working DBZ-6190

    • Support Database role in Connector Config. DBZ-6192

    • Use CHARSET for alterByConvertCharset clause DBZ-6194

    • Remove duplicated createDdlFilter method from historized connector config DBZ-6197

    • Create new SMT to copy/move header to record value DBZ-6201

    • Data loss upon connector restart DBZ-6204

    • ParsingException: DDL statement couldn’t be parsed DBZ-6217

    • The CHARACTER/CHARACTER(p)/CHARACTER VARYING(p) data types not recognized as JDBC type CHAR DBZ-6221

    • MySQL treats the BOOLEAN synonym differently when processed in snapshot vs streaming phases. DBZ-6225

    • MySQL treats REAL synonym differently when processed in snapshot vs streaming phases. DBZ-6226

    • Spanner Connector - Deadlock in BufferedPublisher when publish gives exception DBZ-6227

    • Publish of sync event fails when message becomes very large. DBZ-6228

    • MySQL treats NCHAR/NVARCHAR differently when processed in snapshot vs streaming phases. DBZ-6231

    • Add support for columns of type "bytea[]" - array of bytea (byte array) DBZ-6232

    • MySQL singleDeleteStatement parser does not support table alias DBZ-6243

    • Support ImageFromDockerfile with Debezium’s testcontainers suite DBZ-6244

    • Testcontainers MongoDbReplicaSetTest failing with MongoDB 4.2 DBZ-6247

    • Expose EmbeddedEngine configurations DBZ-6248

    • Wrong error thrown when snapshot.custom_class=custom and no snapshot.custom.class DBZ-6249

    • Missing GEOMETRY keyword which can be used as column name DBZ-6250

    • Postgres connector stuck trying to fallback to restart_lsn when replication slot confirmed_flush_lsn is null. DBZ-6251

    • MariaDB’s UUID column type cannot be parsed when scheme is loaded DBZ-6255

    Altogether, 52 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Đỗ Ngọc Sơn, Anatolii Popov, Anisha Mohanty, Bob Roldan, Chris Cranford, Gunnar Morling, Harvey Yue, Hossein Torabi, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Nir Levy, Plugaru Tudor, Robert Roldan, Russell Mora, Vojtech Juranek, Vojtěch Juránek, and tony joseph!

    Outlook & What’s Next?

    As we approach the end of the Debezium 2.2 development cycle, with a final release expected in the next two weeks, we’re going to begin to turn our attention toward Debezium 2.3. The Debezium 2.3 release will be a much more condensed and focused release, as our goal is to release it in late June.

    We will be refining our roadmap in the coming days, so I would pay close attention to this to get an understanding of what lies ahead in the near future for Debezium 2.3. We would like to hear your feedback or suggestions, so if you have anything you’d like to share be sure to get in touch with us on the mailing list or our chat.

    DevNexus 2023 is also underway this week, from April 4th until April 6th and I will be presenting a talk on CDC Patterns with Distributed Systems using Debezium. If you’re in the Atlanta area and plan to attend DevNexus on Thursday, April 6th, drop me a line.

    Until next time, let the changes continue to stream…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +debezium.sink.rabbitmq.ackTimeout=30000

    The debezium.sink.rabbitmq.connection.* properties are required while the latter two properties for routingKey and ackTimeout are optional or have preset defaults that should be sufficient for most use cases.

    Other fixes

    There were quite a number of other improvements, bug fixes, and stability changes in this release, some noteworthy are:

    • Create an endpoint to update a connector DBZ-5314

    • Refactor snapshotting to use change streams instead of oplog DBZ-5987

    • Update the design for Debezium based connectors Filter step DBZ-6060

    • NPE when setting schema.history.internal.store.only.captured.tables.ddl=true DBZ-6072

    • Postgres connector stuck when replication slot does not have confirmed_flush_lsn DBZ-6092

    • java.lang.NullPointerException in MySQL connector with max.queue.size.in.bytes DBZ-6104

    • debezium-connector-mysql failed to parse serveral DDLs of 'CREATE TABLE' DBZ-6124

    • Connect and stream from sharded clusters through mongos instances DBZ-6170

    • Support Azure blob storage as Debezium history storage DBZ-6180

    • Zerofill property failed for different int types DBZ-6185

    • GRANT DELETE HISTORY couldn’t be parsed in mariadb DBZ-6186

    • ddl parse failed for key partition table DBZ-6188

    • Config options internal.schema.history.internal.ddl.filter not working DBZ-6190

    • Support Database role in Connector Config. DBZ-6192

    • Use CHARSET for alterByConvertCharset clause DBZ-6194

    • Remove duplicated createDdlFilter method from historized connector config DBZ-6197

    • Create new SMT to copy/move header to record value DBZ-6201

    • Data loss upon connector restart DBZ-6204

    • ParsingException: DDL statement couldn’t be parsed DBZ-6217

    • The CHARACTER/CHARACTER(p)/CHARACTER VARYING(p) data types not recognized as JDBC type CHAR DBZ-6221

    • MySQL treats the BOOLEAN synonym differently when processed in snapshot vs streaming phases. DBZ-6225

    • MySQL treats REAL synonym differently when processed in snapshot vs streaming phases. DBZ-6226

    • Spanner Connector - Deadlock in BufferedPublisher when publish gives exception DBZ-6227

    • Publish of sync event fails when message becomes very large. DBZ-6228

    • MySQL treats NCHAR/NVARCHAR differently when processed in snapshot vs streaming phases. DBZ-6231

    • Add support for columns of type "bytea[]" - array of bytea (byte array) DBZ-6232

    • MySQL singleDeleteStatement parser does not support table alias DBZ-6243

    • Support ImageFromDockerfile with Debezium’s testcontainers suite DBZ-6244

    • Testcontainers MongoDbReplicaSetTest failing with MongoDB 4.2 DBZ-6247

    • Expose EmbeddedEngine configurations DBZ-6248

    • Wrong error thrown when snapshot.custom_class=custom and no snapshot.custom.class DBZ-6249

    • Missing GEOMETRY keyword which can be used as column name DBZ-6250

    • Postgres connector stuck trying to fallback to restart_lsn when replication slot confirmed_flush_lsn is null. DBZ-6251

    • MariaDB’s UUID column type cannot be parsed when scheme is loaded DBZ-6255

    Altogether, 52 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Đỗ Ngọc Sơn, Anatolii Popov, Anisha Mohanty, Bob Roldan, Chris Cranford, Gunnar Morling, Harvey Yue, Hossein Torabi, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Nir Levy, Plugaru Tudor, Robert Roldan, Russell Mora, Vojtech Juranek, Vojtěch Juránek, and tony joseph!

    Outlook & What’s Next?

    As we approach the end of the Debezium 2.2 development cycle, with a final release expected in the next two weeks, we’re going to begin to turn our attention toward Debezium 2.3. The Debezium 2.3 release will be a much more condensed and focused release, as our goal is to release it in late June.

    We will be refining our roadmap in the coming days, so I would pay close attention to this to get an understanding of what lies ahead in the near future for Debezium 2.3. We would like to hear your feedback or suggestions, so if you have anything you’d like to share be sure to get in touch with us on the mailing list or our chat.

    DevNexus 2023 is also underway this week, from April 4th until April 6th and I will be presenting a talk on CDC Patterns with Distributed Systems using Debezium. If you’re in the Atlanta area and plan to attend DevNexus on Thursday, April 6th, drop me a line.

    Until next time, let the changes continue to stream…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/04/17/debezium-2-2-cr1-released/index.html b/blog/2023/04/17/debezium-2-2-cr1-released/index.html index 693c23b55a..9b0ad13f48 100644 --- a/blog/2023/04/17/debezium-2-2-cr1-released/index.html +++ b/blog/2023/04/17/debezium-2-2-cr1-released/index.html @@ -1 +1 @@ - Debezium 2.2.0.CR1 Released

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    Upgrade to Quarkus 3

    Quarkus is a Kubernetes Native Java stack that combines the best Java libraries to create fast, low footprint applications. The Debezium Server runtime is based on Quarkus as well as part of Debezium UI. Additionally, the Debezium Outbox extension is also based on the Quarkus platform.

    The upgrade to Quarkus 3 introduces a number of improvements, including using the latest stable releases of a plethora of Java libraries, including the migration from Java EE to Jakarta EE. If you are not familiar with this migration, previously most Java EE platform classes were bundled in the package javax.*. Over the past year or two, more applications have started the move from JavaEE or J2EE to Jakarta EE, and Quarkus 3 marks this transition era. Overall, the only real change is that classes that previously resided in javax.* now are placed in jakarta.*.

    If your application makes use of the Debezium Quarkus Outbox extension, be aware that in order to use Debezium 2.2 with Quarkus, you will need to migrate to Quarkus 3. This also means that if you want to take advantage of the Outbox extension for Reactive data sources, you will be required to use Quarkus 3 as well.

    Finally, if you are developing or maintaining sink adapters for Debezium Server, you will also need to make adjustments to using the new Jakarta EE annotations rather than the older Java EE annotations.

    Debezium Server Pulsar Changes

    In prior versions of the Debezium Server Pulsar sink, the adapter leveraged the send() method to deliver messages in a synchronous way. While this works for sending one-off messages, this has the potential to introduce connector latency as the method waits an acknowledgement of send operation sequentially. Since the Debezium Server sink adapters are provided a collection of events to deliver, the synchronous nature just does not perform well.

    Starting Debezium 2.2, the Pulsar sink will now use sendAsync() to asynchronously deliver the batch of events to Pulsar, netting a substantial increase in overall throughput. While each event within the batch is delivered asynchronously, the adapter will only proceed to the next batch once the current batch is acknowledged in entirety.

    Jolokia support

    Jolokia is a JMX-HTTP bridge that provides an alternative to using JSR-160 to gather metrics. It is an agent based approach that improves traditional JMX by introducing unique features like bulk requests and fine-grained security policies.

    With Debezium 2.2, the debezium/connect image now ships with Jolokia, but this agent isn’t enabled by default. In order to enable Jolokia support, the container must be started with ENABLE_JOLOKIA set to true. By default, Jolokia will bind to port 8778 when enabled.

    In the event that a different port is required, Jolokia will need to be enabled differently. For example, in order to enable Jolokia using port 9779, do not set the ENABLE_JOLOKIA but instead configure the KAFKA_OPTS environment variable as follows:

    -e KAFKA_OPTS="-javaagent:$(ls "$KAFKA_HOME"/libs/jolokia-jvm-*.jar)=port=9779,host=*"

    By specifying the above environment variable, Jolokia’s JMX-HTTP bridge will be available on port 9779 of the container.

    Do not forget to add the Jolokia port to the container’s list of exposed ports when starting.

    Deprecation of Docker Hub

    Docker recently announced a reduction in their free organization account offerings, which is leveraged by a number of open-source communities, including Debezium. Unfortunately, Debezium does not qualify according to their rules.

    Despite that Docker chose to walk back their decision, the Debezium team believes that we need a solid path forward to guarantee the availability of our images to our community without disruption. Debezium has been dual-publishing to both docker.io and quay.io for quite some time, and we will continue this for Debezium 2.2.0.Final and for all Debezium 2.3 preview releases.

    We plan to publish a blog post going into more detail in the coming days with more details. In the meantime, all users should plan to migrate to quay.io as soon as possible to avoid disruption of fetching newer versions of Debezium.

    Other fixes

    There were quite a number of other improvements, bug fixes, and stability changes in this release, some noteworthy are:

    • Upgrade dependencies (Quarkus, etc) of Debezium UI DBZ-4109

    • Failed retriable operations are retried infinitely DBZ-4488

    • UI- Add the UI to configure the additional properties for a connector DBZ-5365

    • Capture events in order across mongodb shards DBZ-5590

    • DDL events not stored in schema history topic for excluded tables DBZ-6070

    • Oracle path used current batchSize to calculate end scn is wrong, need to use min batch size DBZ-6155

    • Upgrade UI build to use Debezium 2.2 or latest DBZ-6173

    • Oracle-Connector dbz##user needs more rights DBZ-6198

    • Make quay.io primary image repository DBZ-6216

    • Multiplatform build of example-postres fails DBZ-6258

    • Add protoc version property to postgres connector pom.xml DBZ-6261

    • Pass through configurations for kafka topics/configuration DBZ-6262

    • Postgres connector doesn’t need logical WAL level when snapshotting only DBZ-6265

    • Update config properties in RHEL deployment instructions DBZ-6266

    • MySQL connector doesn’t need to query binlog when snapshotting only DBZ-6271

    • Table names with spaces are not correctly deserialized when using an Infinispan cache as the transaction buffer DBZ-6273

    • Infinispan cache configuration used by Oracle tests are not compatible with Infinispan 14.0.2 DBZ-6274

    • Transaction buffer state can become corrupted when using Infinispan cache with LOBs DBZ-6275

    • Enable the docker tag to be configurable in the Spanner connector DBZ-6302

    • Upgrade MySQL JDBC driver to 8.0.32 DBZ-6304

    • Allow specifying docker image reference in MongoDB testcontainers implementation DBZ-6305

    • Use MongoDbContainer instead of MongoDBContainer test containers class in ConnectorConfiguration class DBZ-6306

    • DDL statement couldn’t be parsed - Oracle connector 2.1.3.Final DBZ-6314

    • Unparsable DDL statements (MySQL/MariaDB) DBZ-6316

    • Remove outdated information about SYS user accounts with Oracle DBZ-6318

    • Cassandra 3 cannot be built using JDK20 DBZ-6320

    • Bundle Jolokia with Debezium connect image DBZ-6323

    Altogether, 34 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Anisha Mohanty, Bob Roldan, Chris Cranford, Harvey Yue, Jacob Gminder, Jiri Pechanec, Jochen Schalanda, Mario Fiore Vitale, Mark Bereznitsky, Ondrej Babec, Pengwei Dou, Robert Roldan, and Vojtech Juranek!

    Outlook & What’s Next?

    We are approaching the end of the Debezium 2.2 release cycle, with a final release expected this week. If there are any outstanding bugs or issues, please get in touch with us so that can be addressed prior to the final release.

    The Debezium team is shifting their focus on Debezium 2.3. The Debezium 2.3 release will be a much more condensed and focused release, as our goal is to release it in late June. The Debezium roadmap has been updated and the following features are planned for this quarter:

    • Support configurable signaling channels

    • Support exactly once delivery semantics (phase 1)

    • Kubernetes operator for Debezium Server

    • Oracle OpenLogReplicator adapter proof-of-concept / incubating implementation

    • Debezium UI improvements

    We would like to hear your feedback or suggestions, so if you have anything you’d like to share be sure to get in touch with us on the mailing list or our chat.

    Until next time, let the changes continue to stream…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.2.0.CR1 Released

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    Upgrade to Quarkus 3

    Quarkus is a Kubernetes Native Java stack that combines the best Java libraries to create fast, low footprint applications. The Debezium Server runtime is based on Quarkus as well as part of Debezium UI. Additionally, the Debezium Outbox extension is also based on the Quarkus platform.

    The upgrade to Quarkus 3 introduces a number of improvements, including using the latest stable releases of a plethora of Java libraries, including the migration from Java EE to Jakarta EE. If you are not familiar with this migration, previously most Java EE platform classes were bundled in the package javax.*. Over the past year or two, more applications have started the move from JavaEE or J2EE to Jakarta EE, and Quarkus 3 marks this transition era. Overall, the only real change is that classes that previously resided in javax.* now are placed in jakarta.*.

    If your application makes use of the Debezium Quarkus Outbox extension, be aware that in order to use Debezium 2.2 with Quarkus, you will need to migrate to Quarkus 3. This also means that if you want to take advantage of the Outbox extension for Reactive data sources, you will be required to use Quarkus 3 as well.

    Finally, if you are developing or maintaining sink adapters for Debezium Server, you will also need to make adjustments to using the new Jakarta EE annotations rather than the older Java EE annotations.

    Debezium Server Pulsar Changes

    In prior versions of the Debezium Server Pulsar sink, the adapter leveraged the send() method to deliver messages in a synchronous way. While this works for sending one-off messages, this has the potential to introduce connector latency as the method waits an acknowledgement of send operation sequentially. Since the Debezium Server sink adapters are provided a collection of events to deliver, the synchronous nature just does not perform well.

    Starting Debezium 2.2, the Pulsar sink will now use sendAsync() to asynchronously deliver the batch of events to Pulsar, netting a substantial increase in overall throughput. While each event within the batch is delivered asynchronously, the adapter will only proceed to the next batch once the current batch is acknowledged in entirety.

    Jolokia support

    Jolokia is a JMX-HTTP bridge that provides an alternative to using JSR-160 to gather metrics. It is an agent based approach that improves traditional JMX by introducing unique features like bulk requests and fine-grained security policies.

    With Debezium 2.2, the debezium/connect image now ships with Jolokia, but this agent isn’t enabled by default. In order to enable Jolokia support, the container must be started with ENABLE_JOLOKIA set to true. By default, Jolokia will bind to port 8778 when enabled.

    In the event that a different port is required, Jolokia will need to be enabled differently. For example, in order to enable Jolokia using port 9779, do not set the ENABLE_JOLOKIA but instead configure the KAFKA_OPTS environment variable as follows:

    -e KAFKA_OPTS="-javaagent:$(ls "$KAFKA_HOME"/libs/jolokia-jvm-*.jar)=port=9779,host=*"

    By specifying the above environment variable, Jolokia’s JMX-HTTP bridge will be available on port 9779 of the container.

    Do not forget to add the Jolokia port to the container’s list of exposed ports when starting.

    Deprecation of Docker Hub

    Docker recently announced a reduction in their free organization account offerings, which is leveraged by a number of open-source communities, including Debezium. Unfortunately, Debezium does not qualify according to their rules.

    Despite that Docker chose to walk back their decision, the Debezium team believes that we need a solid path forward to guarantee the availability of our images to our community without disruption. Debezium has been dual-publishing to both docker.io and quay.io for quite some time, and we will continue this for Debezium 2.2.0.Final and for all Debezium 2.3 preview releases.

    We plan to publish a blog post going into more detail in the coming days with more details. In the meantime, all users should plan to migrate to quay.io as soon as possible to avoid disruption of fetching newer versions of Debezium.

    Other fixes

    There were quite a number of other improvements, bug fixes, and stability changes in this release, some noteworthy are:

    • Upgrade dependencies (Quarkus, etc) of Debezium UI DBZ-4109

    • Failed retriable operations are retried infinitely DBZ-4488

    • UI- Add the UI to configure the additional properties for a connector DBZ-5365

    • Capture events in order across mongodb shards DBZ-5590

    • DDL events not stored in schema history topic for excluded tables DBZ-6070

    • Oracle path used current batchSize to calculate end scn is wrong, need to use min batch size DBZ-6155

    • Upgrade UI build to use Debezium 2.2 or latest DBZ-6173

    • Oracle-Connector dbz##user needs more rights DBZ-6198

    • Make quay.io primary image repository DBZ-6216

    • Multiplatform build of example-postres fails DBZ-6258

    • Add protoc version property to postgres connector pom.xml DBZ-6261

    • Pass through configurations for kafka topics/configuration DBZ-6262

    • Postgres connector doesn’t need logical WAL level when snapshotting only DBZ-6265

    • Update config properties in RHEL deployment instructions DBZ-6266

    • MySQL connector doesn’t need to query binlog when snapshotting only DBZ-6271

    • Table names with spaces are not correctly deserialized when using an Infinispan cache as the transaction buffer DBZ-6273

    • Infinispan cache configuration used by Oracle tests are not compatible with Infinispan 14.0.2 DBZ-6274

    • Transaction buffer state can become corrupted when using Infinispan cache with LOBs DBZ-6275

    • Enable the docker tag to be configurable in the Spanner connector DBZ-6302

    • Upgrade MySQL JDBC driver to 8.0.32 DBZ-6304

    • Allow specifying docker image reference in MongoDB testcontainers implementation DBZ-6305

    • Use MongoDbContainer instead of MongoDBContainer test containers class in ConnectorConfiguration class DBZ-6306

    • DDL statement couldn’t be parsed - Oracle connector 2.1.3.Final DBZ-6314

    • Unparsable DDL statements (MySQL/MariaDB) DBZ-6316

    • Remove outdated information about SYS user accounts with Oracle DBZ-6318

    • Cassandra 3 cannot be built using JDK20 DBZ-6320

    • Bundle Jolokia with Debezium connect image DBZ-6323

    Altogether, 34 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Anisha Mohanty, Bob Roldan, Chris Cranford, Harvey Yue, Jacob Gminder, Jiri Pechanec, Jochen Schalanda, Mario Fiore Vitale, Mark Bereznitsky, Ondrej Babec, Pengwei Dou, Robert Roldan, and Vojtech Juranek!

    Outlook & What’s Next?

    We are approaching the end of the Debezium 2.2 release cycle, with a final release expected this week. If there are any outstanding bugs or issues, please get in touch with us so that can be addressed prior to the final release.

    The Debezium team is shifting their focus on Debezium 2.3. The Debezium 2.3 release will be a much more condensed and focused release, as our goal is to release it in late June. The Debezium roadmap has been updated and the following features are planned for this quarter:

    • Support configurable signaling channels

    • Support exactly once delivery semantics (phase 1)

    • Kubernetes operator for Debezium Server

    • Oracle OpenLogReplicator adapter proof-of-concept / incubating implementation

    • Debezium UI improvements

    We would like to hear your feedback or suggestions, so if you have anything you’d like to share be sure to get in touch with us on the mailing list or our chat.

    Until next time, let the changes continue to stream…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/04/20/debezium-2-2-final-released/index.html b/blog/2023/04/20/debezium-2-2-final-released/index.html index bbca85ea9f..992d4ad6fb 100644 --- a/blog/2023/04/20/debezium-2-2-final-released/index.html +++ b/blog/2023/04/20/debezium-2-2-final-released/index.html @@ -61,4 +61,4 @@ schema.history.internal.rocketmq.secret.key=<rocketmq-secret-key> schema.history.internal.rocketmq.recovery.attempts=5 schema.history.internal.rocketmq.recovery.poll.interval.ms=1000 -schema.history.internal.rocketmq.store.record.timeout.ms=2000
    schema.history.internal.rocketmq.topic

    Specifies the topic name where the schema history will be stored.

    schema.history.internal.rocketmq.name.srv.addr

    Specifies the service discovery service nameserver for Rocket MQ.

    schema.history.internal.rocketmq.acl.enabled

    Specifies whether access control lists (ACLs) are enabled, defaults to false.

    schema.history.internal.rocketmq.access.key

    Specifies the Rocket MQ access key, required only if ACLs are enabled.

    schema.history.internal.rocketmq.secret.key

    Specifies the Rocket MQ secret key, required only if ACLs are enabled.

    schema.history.internal.rocketmq.recovery.attempts

    Specifies the number of sequential attempts that no data is returned before recovery completes.

    schema.history.internal.rocketmq.recovery.poll.interval.ms

    Specifies the number of milliseconds for each poll attempt to recover the history.

    schema.history.internal.rocketmq.store.record.timeout.ms

    Specifies the number of milliseconds for a write to Rocket MQ to complete before timing out.

    Other fixes & improvements

    What’s Next?

    We began pre-planning Debezium 2.3 several weeks ago and with 2.2 shipped, our focus will now be on the next minor release. With Debezium 2.2 release cycle being a tad longer than normal, the release cycle for 2.3 will be condensed as we want to return to our end-of-quarter release cadence. In order to achieve that goal, we’ve chosen to focus on the following features for the next minor release:

    Configurable Signal Channels

    The goal of this change is to provide a way in which signals can be sent to a connector from a variety of sources, including things like the filesystem, Kafka topic, database table, etc.

    Exactly once delivery semantics

    Debezium currently only guarantees at-least-once delivery semantics, meaning that a change event could be written to a topic more than once in the case of unsafe shutdowns or failures of a connector. Kafka and by extension Kafka Connect, now support exactly-once delivery and we want to explore this feature as part of Debezium. The goal is to focus adding this to at least once connector as a proof of concept and based on feedback, extend this to all connectors.

    Kubernetes operator for Debezium Server

    Debezium Server has gained quite a bit of exposure in recent months, both with new sink adapters and just general usage by the community. We want to bring the power of Kubernetes to Debezium Server, introducing an operator that you can deploy in order to manage the full lifecycle of a Debezium Server deployment.

    Ingestion from Oracle using OpenLogReplicator

    The Debezium Oracle connector presents supports ingestion of changes using XStream or LogMiner. We want to build a proof-of-concept using OpenLogReplicator, a native application that is capable of reading the Oracle redo and archive logs directly from the file system. We do not intend to replace either of the existing adapters with this new approach, but to instead extend the connector’s functionality to offer alternatives to data ingestion that may have less overhead.

    Debezium UI Enhancements

    We believe there is a lot of unlocked potential with Debezium UI, so this release will focus on improving that overall user experience by adding new features like starting/stopping ad-hoc snapshots, editing connector deployments, and displaying critical connector metrics.

    While the team intends to focus on the above improvements, we would really like your feedback or suggestions. If you have anything that you’d like to share, be sure to get in touch with us on the mailing list or our chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +schema.history.internal.rocketmq.store.record.timeout.ms=2000
    schema.history.internal.rocketmq.topic

    Specifies the topic name where the schema history will be stored.

    schema.history.internal.rocketmq.name.srv.addr

    Specifies the service discovery service nameserver for Rocket MQ.

    schema.history.internal.rocketmq.acl.enabled

    Specifies whether access control lists (ACLs) are enabled, defaults to false.

    schema.history.internal.rocketmq.access.key

    Specifies the Rocket MQ access key, required only if ACLs are enabled.

    schema.history.internal.rocketmq.secret.key

    Specifies the Rocket MQ secret key, required only if ACLs are enabled.

    schema.history.internal.rocketmq.recovery.attempts

    Specifies the number of sequential attempts that no data is returned before recovery completes.

    schema.history.internal.rocketmq.recovery.poll.interval.ms

    Specifies the number of milliseconds for each poll attempt to recover the history.

    schema.history.internal.rocketmq.store.record.timeout.ms

    Specifies the number of milliseconds for a write to Rocket MQ to complete before timing out.

    Other fixes & improvements

    What’s Next?

    We began pre-planning Debezium 2.3 several weeks ago and with 2.2 shipped, our focus will now be on the next minor release. With Debezium 2.2 release cycle being a tad longer than normal, the release cycle for 2.3 will be condensed as we want to return to our end-of-quarter release cadence. In order to achieve that goal, we’ve chosen to focus on the following features for the next minor release:

    Configurable Signal Channels

    The goal of this change is to provide a way in which signals can be sent to a connector from a variety of sources, including things like the filesystem, Kafka topic, database table, etc.

    Exactly once delivery semantics

    Debezium currently only guarantees at-least-once delivery semantics, meaning that a change event could be written to a topic more than once in the case of unsafe shutdowns or failures of a connector. Kafka and by extension Kafka Connect, now support exactly-once delivery and we want to explore this feature as part of Debezium. The goal is to focus adding this to at least once connector as a proof of concept and based on feedback, extend this to all connectors.

    Kubernetes operator for Debezium Server

    Debezium Server has gained quite a bit of exposure in recent months, both with new sink adapters and just general usage by the community. We want to bring the power of Kubernetes to Debezium Server, introducing an operator that you can deploy in order to manage the full lifecycle of a Debezium Server deployment.

    Ingestion from Oracle using OpenLogReplicator

    The Debezium Oracle connector presents supports ingestion of changes using XStream or LogMiner. We want to build a proof-of-concept using OpenLogReplicator, a native application that is capable of reading the Oracle redo and archive logs directly from the file system. We do not intend to replace either of the existing adapters with this new approach, but to instead extend the connector’s functionality to offer alternatives to data ingestion that may have less overhead.

    Debezium UI Enhancements

    We believe there is a lot of unlocked potential with Debezium UI, so this release will focus on improving that overall user experience by adding new features like starting/stopping ad-hoc snapshots, editing connector deployments, and displaying critical connector metrics.

    While the team intends to focus on the above improvements, we would really like your feedback or suggestions. If you have anything that you’d like to share, be sure to get in touch with us on the mailing list or our chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/04/25/container-images-quayio/index.html b/blog/2023/04/25/container-images-quayio/index.html index 1f9e182f7c..0bccf163ad 100644 --- a/blog/2023/04/25/container-images-quayio/index.html +++ b/blog/2023/04/25/container-images-quayio/index.html @@ -1 +1 @@ - New Debezium images will be available only Quay.io in the future

    As you may have noticed, the Docker company recently announced a reduction of the free organization accounts offering. The Docker company wanted to provide for free organization accounts only for Docker-Sponsored Open Source (DSOS) projects. Debezium project doesn’t meet their definition of open source project as we have a pathway to commercialization. As the accounts ought to be terminated in 30 days, we immediately started to work on moving out the Debezium project from Docker Hub.

    Based on the feedback from the community, the Docker company later on re-evaluated their decision and Free Team plan is still available as before. However, the whole story, and especially the initial intention to give projects which don’t meet DSOS conditions only 30 days for migration, undermined our trust in the Docker company and raised a question of what will come in the future. As a result we decided not to publish Debezium images on Docker Hub in the future.

    For quite some time we already publish all Debezium images into two container image registries:

    Upcomming 2.2 release and previews of 2.3 (including CR releases), will be still available on the Docker Hub, but starting 2.3.0.Final release, we will stop publishing images there. Images of Debezium 2.3.0.Final and subsequent releases will be available only on the Quay.io. Older, already published, images will be of course still available through Docker Hub (unless Docker company changes the conditions which would prevent it in the future). Older Debezium images can be found also on Quay.io.

    Quay.io is a mature container registry service, which provides additional features like e.g. vulnerability scans. As the Quay.io is run and sponsored by Red Hat, and we already publish the image there, it was a natural choice for us to move to this container registry.

    How to migrate to Quay.io? It’s very simple - just add quay.io/ prefix to the container image name, e.g. instead of running

    docker pull debezium/connect:latest

    you run

    docker pull quay.io/debezium/connect:latest

    and similar for any other images or commands.

    If you have any questions or issues with using Quay.io images, don’t hesitate to reach to us and raise your questions or concerns in our user chat room.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + New Debezium images will be available only Quay.io in the future

    As you may have noticed, the Docker company recently announced a reduction of the free organization accounts offering. The Docker company wanted to provide for free organization accounts only for Docker-Sponsored Open Source (DSOS) projects. Debezium project doesn’t meet their definition of open source project as we have a pathway to commercialization. As the accounts ought to be terminated in 30 days, we immediately started to work on moving out the Debezium project from Docker Hub.

    Based on the feedback from the community, the Docker company later on re-evaluated their decision and Free Team plan is still available as before. However, the whole story, and especially the initial intention to give projects which don’t meet DSOS conditions only 30 days for migration, undermined our trust in the Docker company and raised a question of what will come in the future. As a result we decided not to publish Debezium images on Docker Hub in the future.

    For quite some time we already publish all Debezium images into two container image registries:

    Upcomming 2.2 release and previews of 2.3 (including CR releases), will be still available on the Docker Hub, but starting 2.3.0.Final release, we will stop publishing images there. Images of Debezium 2.3.0.Final and subsequent releases will be available only on the Quay.io. Older, already published, images will be of course still available through Docker Hub (unless Docker company changes the conditions which would prevent it in the future). Older Debezium images can be found also on Quay.io.

    Quay.io is a mature container registry service, which provides additional features like e.g. vulnerability scans. As the Quay.io is run and sponsored by Red Hat, and we already publish the image there, it was a natural choice for us to move to this container registry.

    How to migrate to Quay.io? It’s very simple - just add quay.io/ prefix to the container image name, e.g. instead of running

    docker pull debezium/connect:latest

    you run

    docker pull quay.io/debezium/connect:latest

    and similar for any other images or commands.

    If you have any questions or issues with using Quay.io images, don’t hesitate to reach to us and raise your questions or concerns in our user chat room.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/05/02/tensorflow-mnist-classification/index.html b/blog/2023/05/02/tensorflow-mnist-classification/index.html index ce52b1a7a6..0e1b9439bf 100644 --- a/blog/2023/05/02/tensorflow-mnist-classification/index.html +++ b/blog/2023/05/02/tensorflow-mnist-classification/index.html @@ -97,4 +97,4 @@ ], ) -manual_ds = manual_ds.map(decode_kafka_stream_record)

    In either case, the result should look like this:

    TensorFlow digit recognition from streaming

    Conclusions

    In this demo, we have shown how to load existing data from the database, transform it on the fly, ingest it into the TensorFlow model via Kafka, and use it for model training. Later on, we ingested newly created data into this pre-trained model using CDC and data streaming and obtained meaningful results. Debezium can provide valuable service not only for use cases like the one described in this post but can also play a key role in ingesting data to online machine learning pipelines.

    While the whole pipeline is relatively easy to implement, some areas can be improved to improve the user experience and/or make the entire pipeline more smooth. As our (Debezium developers) background is not primarily in machine learning and data science, we would appreciate any input from the community on how Debezium can aid machine learning pipelines (or is already used, if there are any such cases) and where are the rooms for improvements. We would also appreciate any new ideas on how Debezium, or in general, change data capture, can be helpful in this area. These ideas further reveal Debezium’s potential to ingest data into machine learning pipelines and contribute to better user experience in the whole process. In case you have any input any this regard, don’t hesitate to reach out to us on the Zulip chat, mailing list or you can transform your ideas directly into Jira feature requests.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +manual_ds = manual_ds.map(decode_kafka_stream_record)

    In either case, the result should look like this:

    TensorFlow digit recognition from streaming

    Conclusions

    In this demo, we have shown how to load existing data from the database, transform it on the fly, ingest it into the TensorFlow model via Kafka, and use it for model training. Later on, we ingested newly created data into this pre-trained model using CDC and data streaming and obtained meaningful results. Debezium can provide valuable service not only for use cases like the one described in this post but can also play a key role in ingesting data to online machine learning pipelines.

    While the whole pipeline is relatively easy to implement, some areas can be improved to improve the user experience and/or make the entire pipeline more smooth. As our (Debezium developers) background is not primarily in machine learning and data science, we would appreciate any input from the community on how Debezium can aid machine learning pipelines (or is already used, if there are any such cases) and where are the rooms for improvements. We would also appreciate any new ideas on how Debezium, or in general, change data capture, can be helpful in this area. These ideas further reveal Debezium’s potential to ingest data into machine learning pipelines and contribute to better user experience in the whole process. In case you have any input any this regard, don’t hesitate to reach out to us on the Zulip chat, mailing list or you can transform your ideas directly into Jira feature requests.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/05/15/debezium-2-3-alpha1-released/index.html b/blog/2023/05/15/debezium-2-3-alpha1-released/index.html index 7900e3d917..9eb1c5194d 100644 --- a/blog/2023/05/15/debezium-2-3-alpha1-released/index.html +++ b/blog/2023/05/15/debezium-2-3-alpha1-released/index.html @@ -29,4 +29,4 @@ "schema.history.internal.jdbc.user": "dbuser", "schema.history.internal.jdbc.password": "secret", "schema.history.internal.jdbc.schema.history.table.name": "debezium_database_history" -}

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Toasted varying character array and date array are not correcly processed DBZ-6122

    • Introduce LogMiner query filtering modes DBZ-6254

    • Lock contention on LOG_MINING_FLUSH table when multiple connectors deployed DBZ-6256

    • Ensure that the connector can start from a stale timestamp more than one hour into the past DBZ-6307

    • The rs_id field is null in Oracle change event source information block DBZ-6329

    • Add JWT authentication to HTTP Client DBZ-6348

    • Using pg_replication_slot_advance which is not supported by PostgreSQL10. DBZ-6353

    • log.mining.transaction.retention.hours should reference last offset and not sysdate DBZ-6355

    • Support multiple tasks when streaming shard list DBZ-6365

    • Kinesis Sink - AWS Credentials Provider DBZ-6372

    • Toasted hstore are not correcly processed DBZ-6379

    • Oracle DDL shrink space for table partition can not be parsed DBZ-6386

    • __source_ts_ms r (read) operation date is set to future for SQL Server DBZ-6388

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-6396

    • MongoDB connector crashes on invalid resume token DBZ-6402

    • NPE on read-only MySQL connector start up DBZ-6440

    What’s next?

    With Debezium 2.3 underway, I do expect a rather quick cycle of alpha, beta, and final releases over the next six weeks. We still have a lot to do in this time period that we hope to get into this release, so stay tuned. As we get closer to the end of June, we’ll begin our planning for Debezium 2.4!

    Also, Red Hat Summit 2023 is next week in Boston. There will be a break-out session where Hugo and Chris will be discussing the new Debezium JDBC sink connector. If you’re able to attend, we’d love to have an opportunity to chat with you before or after the session.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Toasted varying character array and date array are not correcly processed DBZ-6122

    • Introduce LogMiner query filtering modes DBZ-6254

    • Lock contention on LOG_MINING_FLUSH table when multiple connectors deployed DBZ-6256

    • Ensure that the connector can start from a stale timestamp more than one hour into the past DBZ-6307

    • The rs_id field is null in Oracle change event source information block DBZ-6329

    • Add JWT authentication to HTTP Client DBZ-6348

    • Using pg_replication_slot_advance which is not supported by PostgreSQL10. DBZ-6353

    • log.mining.transaction.retention.hours should reference last offset and not sysdate DBZ-6355

    • Support multiple tasks when streaming shard list DBZ-6365

    • Kinesis Sink - AWS Credentials Provider DBZ-6372

    • Toasted hstore are not correcly processed DBZ-6379

    • Oracle DDL shrink space for table partition can not be parsed DBZ-6386

    • __source_ts_ms r (read) operation date is set to future for SQL Server DBZ-6388

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-6396

    • MongoDB connector crashes on invalid resume token DBZ-6402

    • NPE on read-only MySQL connector start up DBZ-6440

    What’s next?

    With Debezium 2.3 underway, I do expect a rather quick cycle of alpha, beta, and final releases over the next six weeks. We still have a lot to do in this time period that we hope to get into this release, so stay tuned. As we get closer to the end of June, we’ll begin our planning for Debezium 2.4!

    Also, Red Hat Summit 2023 is next week in Boston. There will be a break-out session where Hugo and Chris will be discussing the new Debezium JDBC sink connector. If you’re able to attend, we’d love to have an opportunity to chat with you before or after the session.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/05/29/debezium-2-3-beta1-released/index.html b/blog/2023/05/29/debezium-2-3-beta1-released/index.html index ca79cc159e..3c6031375a 100644 --- a/blog/2023/05/29/debezium-2-3-beta1-released/index.html +++ b/blog/2023/05/29/debezium-2-3-beta1-released/index.html @@ -1,3 +1,3 @@ Debezium 2.3.0.Beta1 Released

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    Breaking Changes

    Debezium recently introduced the JDBC storage module that allows you to store offsets and schema history data inside a relational database. The JDBC storage module used UTF-16 as its default encoding; however, most databases use UTF-8. This release of Debezium aligns the JDBC storage module’s encoding to use UTF-8 moving forward.

    PostgreSQL Replica Identity Changes

    Debezium 2.3 introduces a new PostgreSQL connector feature called "Autoset Replica Identity".

    Replica identity is PostgreSQL’s way to identify what columns are captured in the database transaction logs for inserts, updates, and deletes. This new feature allows configuring a table’s replica identity via connector configuration and delegating the responsibility of setting this configuration to the connector at start-up.

    The new configuration option, replica.identity.autoset.values, specifies a comma-separated list of table and replica identity tuples. If the table already has a given replica identity, the identity will be overwritten to match what is specified in this configuration if the table is included. PostgreSQL supports several replica identity types, more information on these can be found in the documentation.

    When specifying the replica.identity.autoset.values, the value is a comma-separated list of values where each element uses the format of <fully-qualified-table-name>:<replica-identity>. An example is shown below where two tables are configured to have full replica identity:

    {
       "replica.identity.autoset.values": "public.table1:FULL,public.table2:FULL"
    -}

    Be mindful that if the user account used by the connector does not have the appropriate database permissions to set a table’s replica identity, the use of this feature will result in a failure. In the event of a failure due to permissions, you must make sure the proper replica identity is set manually using a database account with the right permissions.

    Correlate Incremental Snapshot notification ids

    Debezium 2.3 introduces a new notification and channels subsystem. This subsystem allows you to send a signal via a variety of channels that include the filesystem, Kafka topic, and database table out of the box; however, the feature is extendable. In addition, this subsystem also includes the ability to send notifications about the status of the initial snapshots and incremental snapshots if they’re used. These notifications can help facilitate an easier line of communication between Debezium and other third-party systems that may need to know when an incremental or traditional snapshot has finished and whether it finished successfully or not.

    In this release, the notification and channels subsystem has been improved to correlate the signal to the notification. So when you send a signal and it is consumed by Debezium, any notification that is raised will contain a reference to the signal, allowing any third-party or external process to know precisely which signal the notification references.

    This should help close the gap in distribution communications across applications or processes relying on the new notification and channel subsystem.

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Debezium Server stops sending events to Google Cloud Pub/Sub DBZ-5175

    • Snapshot step 5 - Reading structure of captured tables time too long DBZ-6439

    • Oracle parallel snapshots do not properly set PDB context when using multitenancy DBZ-6457

    • [MariaDB] Add support for userstat plugin keywords DBZ-6459

    • Debezium Server cannot recover from Google Pub/Sub errors DBZ-6461

    • Db2 connector can fail with NPE on notification sending DBZ-6485

    • BigDecimal fails when queue memory size limit is in place DBZ-6490

    • ORACLE table can not be captrued, got runtime.NoViableAltException DBZ-6492

    • Signal poll interval has incorrect default value DBZ-6496

    • Oracle JDBC driver 23.x throws ORA-18716 - not in any time zone DBZ-6502

    • Alpine postgres images should use llvm/clang 15 explicitly DBZ-6506

    • ExtractNewRecordState SMT in combination with HeaderToValue SMT results in Unexpected field name exception DBZ-6486

    Altogether, 22 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Angshuman Dey, Anisha Mohanty, Chris Cranford, Harvey Yue, Ismail Simsek, Jakub Cechacek, Jiri Pechanec, Jochen Schalanda, Kanthi Subramanian, Mario Fiore Vitale, Martin Medek, and Vojtech Juranek!

    What’s next?

    With Debezium 2.3 being released under a condensed schedule, you can expect the next CR1 release within the next 1-2 weeks. The plan is to release Debezium 2.3.0.Final in the middle of June and for the team to begin preparation on Debezium 2.4.

    As we begin to prepare to move toward Debezium 2.4, we would love to hear your feedback or suggestions. The roadmap will be updated in the coming week, so please be sure to get in touch with us on the mailing list or our chat if you have any ideas or suggestions.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    Be mindful that if the user account used by the connector does not have the appropriate database permissions to set a table’s replica identity, the use of this feature will result in a failure. In the event of a failure due to permissions, you must make sure the proper replica identity is set manually using a database account with the right permissions.

    Correlate Incremental Snapshot notification ids

    Debezium 2.3 introduces a new notification and channels subsystem. This subsystem allows you to send a signal via a variety of channels that include the filesystem, Kafka topic, and database table out of the box; however, the feature is extendable. In addition, this subsystem also includes the ability to send notifications about the status of the initial snapshots and incremental snapshots if they’re used. These notifications can help facilitate an easier line of communication between Debezium and other third-party systems that may need to know when an incremental or traditional snapshot has finished and whether it finished successfully or not.

    In this release, the notification and channels subsystem has been improved to correlate the signal to the notification. So when you send a signal and it is consumed by Debezium, any notification that is raised will contain a reference to the signal, allowing any third-party or external process to know precisely which signal the notification references.

    This should help close the gap in distribution communications across applications or processes relying on the new notification and channel subsystem.

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Debezium Server stops sending events to Google Cloud Pub/Sub DBZ-5175

    • Snapshot step 5 - Reading structure of captured tables time too long DBZ-6439

    • Oracle parallel snapshots do not properly set PDB context when using multitenancy DBZ-6457

    • [MariaDB] Add support for userstat plugin keywords DBZ-6459

    • Debezium Server cannot recover from Google Pub/Sub errors DBZ-6461

    • Db2 connector can fail with NPE on notification sending DBZ-6485

    • BigDecimal fails when queue memory size limit is in place DBZ-6490

    • ORACLE table can not be captrued, got runtime.NoViableAltException DBZ-6492

    • Signal poll interval has incorrect default value DBZ-6496

    • Oracle JDBC driver 23.x throws ORA-18716 - not in any time zone DBZ-6502

    • Alpine postgres images should use llvm/clang 15 explicitly DBZ-6506

    • ExtractNewRecordState SMT in combination with HeaderToValue SMT results in Unexpected field name exception DBZ-6486

    Altogether, 22 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Angshuman Dey, Anisha Mohanty, Chris Cranford, Harvey Yue, Ismail Simsek, Jakub Cechacek, Jiri Pechanec, Jochen Schalanda, Kanthi Subramanian, Mario Fiore Vitale, Martin Medek, and Vojtech Juranek!

    What’s next?

    With Debezium 2.3 being released under a condensed schedule, you can expect the next CR1 release within the next 1-2 weeks. The plan is to release Debezium 2.3.0.Final in the middle of June and for the team to begin preparation on Debezium 2.4.

    As we begin to prepare to move toward Debezium 2.4, we would love to hear your feedback or suggestions. The roadmap will be updated in the coming week, so please be sure to get in touch with us on the mailing list or our chat if you have any ideas or suggestions.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/06/12/debezium-2-3-cr1-released/index.html b/blog/2023/06/12/debezium-2-3-cr1-released/index.html index fd68f2b457..06aa5cfc26 100644 --- a/blog/2023/06/12/debezium-2-3-cr1-released/index.html +++ b/blog/2023/06/12/debezium-2-3-cr1-released/index.html @@ -1 +1 @@ - Debezium 2.3.0.CR1 Released

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    Breaking Changes

    This release includes no breaking changes, so upgrading from Debezium 2.3.0.Beta1 to 2.3.0.CR1 should be a simple drop-in replacement.

    If you are upgrading from a prior version, please review the release notes for any migration steps that may be necessary.

    Debezium Server Kubernetes Operator

    This release introduces a preview version of the new Debezium Operator, providing the ability to deploy and manage Debezium Server instances within Kubernetes. Debezium Server allows you to stream change events from your data sources to a wide variety of messaging infrastructures. Our goal is to provide a Kafka-less alternative for the Debezium community who wish to utilize Kubernetes for scalability and high availability deployments.

    Presently, the documentation is sparse as the operator is in early incubation stages; however, we do intend to improve upon this throughout the remainder of the 2.3 as well into the Debezium 2.4 release cycles. You can find a deployment example and basic description of the custom resource specification in the github repository that you can use as a reference for the short-term.

    We do not recommend a production deployment of this component at this time; however, we encourage users to provide community feedback. The feedback will be valuable in evaluating if the component is feature ready or if there are still areas of improvement to meet everyone’s needs.

    JMX signals and notifications

    Debezium 2.3 previously introduced both a new signal channel and notification feature. This feature allows external applications to easily integrate with Debezium, sending signals to perform various tasks such as ad-hoc incremental snapshots, and to receive notifications about the progress of such tasks. This release builds on top of that functionality to allow the ability to send signals and receive notifications via JMX.

    Sending signals

    In this release, the signal channel subsystem has been improved to support sending signals via JMX. From the jconsole window, you can now see there are two new subsections for a connector, a notifications and signal section shown below:

    JConsole JMX metrics

    The new signals section allows you to invoke an operation on the JMX bean in order to transmit a signal to Debezium. This signal resembles the logical signal table structure where it accepts 3 parameters, a unique identifier, the signal type, and finally the signal payload. The following illustrates what this looks like from jconsole:

    JConsole Sending Signals

    Receiving notifications

    The new notifications section allows you to receive and react to notifications captured by the JMX bean from Debezium. The Debezium JMX bean will buffer all notifications to ensure that no notification is missed. The following illustrates what this looks like from jconsole:

    JConsole Receive Notifications

    The JMX bean does not automatically clear the notification queue. In order to avoid memory concerns, be sure to invoke the reset method on the notifications bean once you’ve processed all notifications.

    We look forward to your feedback on this new way to integrate signals and notifications with Debezium over JMX.

    Other fixes

    • Code Improvements for skip.messages.without.change DBZ-6366

    • FileSignalChannel is not loaded DBZ-6509

    • Utilize event.processing.failure.handling.mode in Vitess replication connection DBZ-6510

    • MySqlReadOnlyIncrementalSnapshotChangeEventSource enforces Kafka dependency during initialization DBZ-6511

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6512

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6515

    • Only use error processing mode on certain errors DBZ-6523

    • [PostgreSQL] LTree data is not being captured by streaming DBZ-6524

    • Use better hashing function for PartitionRouting DBZ-6529

    • Start publishing nightly images for Debezium Operator DBZ-6541

    • Start releasing images for Debezium Operator DBZ-6542

    Altogether, 24 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Anisha Mohanty, Jakub Cechacek, Jesse Ehrenzweig, Jiri Pechanec, Mario Fiore Vitale, Ronak Jain, Thomas Thornton, Tommy Karlsson, and Vojtech Juranek!

    What’s next?

    With Debezium 2.3 quickly approaching a Final release, Debezium 2.4 is just around the corner.

    Debezium 2.4 planning is underway, and we would love to have your feedback. Please reach out to us on the mailing list or our chat if you have any ideas or suggestions. The roadmap for Debezium 2.4 and beyond will be updated in conjunction with Debezium 2.3 final in the coming week.

    And until next time, stay cool as summer approaches!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.3.0.CR1 Released

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    Breaking Changes

    This release includes no breaking changes, so upgrading from Debezium 2.3.0.Beta1 to 2.3.0.CR1 should be a simple drop-in replacement.

    If you are upgrading from a prior version, please review the release notes for any migration steps that may be necessary.

    Debezium Server Kubernetes Operator

    This release introduces a preview version of the new Debezium Operator, providing the ability to deploy and manage Debezium Server instances within Kubernetes. Debezium Server allows you to stream change events from your data sources to a wide variety of messaging infrastructures. Our goal is to provide a Kafka-less alternative for the Debezium community who wish to utilize Kubernetes for scalability and high availability deployments.

    Presently, the documentation is sparse as the operator is in early incubation stages; however, we do intend to improve upon this throughout the remainder of the 2.3 as well into the Debezium 2.4 release cycles. You can find a deployment example and basic description of the custom resource specification in the github repository that you can use as a reference for the short-term.

    We do not recommend a production deployment of this component at this time; however, we encourage users to provide community feedback. The feedback will be valuable in evaluating if the component is feature ready or if there are still areas of improvement to meet everyone’s needs.

    JMX signals and notifications

    Debezium 2.3 previously introduced both a new signal channel and notification feature. This feature allows external applications to easily integrate with Debezium, sending signals to perform various tasks such as ad-hoc incremental snapshots, and to receive notifications about the progress of such tasks. This release builds on top of that functionality to allow the ability to send signals and receive notifications via JMX.

    Sending signals

    In this release, the signal channel subsystem has been improved to support sending signals via JMX. From the jconsole window, you can now see there are two new subsections for a connector, a notifications and signal section shown below:

    JConsole JMX metrics

    The new signals section allows you to invoke an operation on the JMX bean in order to transmit a signal to Debezium. This signal resembles the logical signal table structure where it accepts 3 parameters, a unique identifier, the signal type, and finally the signal payload. The following illustrates what this looks like from jconsole:

    JConsole Sending Signals

    Receiving notifications

    The new notifications section allows you to receive and react to notifications captured by the JMX bean from Debezium. The Debezium JMX bean will buffer all notifications to ensure that no notification is missed. The following illustrates what this looks like from jconsole:

    JConsole Receive Notifications

    The JMX bean does not automatically clear the notification queue. In order to avoid memory concerns, be sure to invoke the reset method on the notifications bean once you’ve processed all notifications.

    We look forward to your feedback on this new way to integrate signals and notifications with Debezium over JMX.

    Other fixes

    • Code Improvements for skip.messages.without.change DBZ-6366

    • FileSignalChannel is not loaded DBZ-6509

    • Utilize event.processing.failure.handling.mode in Vitess replication connection DBZ-6510

    • MySqlReadOnlyIncrementalSnapshotChangeEventSource enforces Kafka dependency during initialization DBZ-6511

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6512

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6515

    • Only use error processing mode on certain errors DBZ-6523

    • [PostgreSQL] LTree data is not being captured by streaming DBZ-6524

    • Use better hashing function for PartitionRouting DBZ-6529

    • Start publishing nightly images for Debezium Operator DBZ-6541

    • Start releasing images for Debezium Operator DBZ-6542

    Altogether, 24 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Anisha Mohanty, Jakub Cechacek, Jesse Ehrenzweig, Jiri Pechanec, Mario Fiore Vitale, Ronak Jain, Thomas Thornton, Tommy Karlsson, and Vojtech Juranek!

    What’s next?

    With Debezium 2.3 quickly approaching a Final release, Debezium 2.4 is just around the corner.

    Debezium 2.4 planning is underway, and we would love to have your feedback. Please reach out to us on the mailing list or our chat if you have any ideas or suggestions. The roadmap for Debezium 2.4 and beyond will be updated in conjunction with Debezium 2.3 final in the coming week.

    And until next time, stay cool as summer approaches!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/06/21/debezium-2-3-final-released/index.html b/blog/2023/06/21/debezium-2-3-final-released/index.html index a0d786dfba..3b1ae083f7 100644 --- a/blog/2023/06/21/debezium-2-3-final-released/index.html +++ b/blog/2023/06/21/debezium-2-3-final-released/index.html @@ -35,4 +35,4 @@ "connector.class": "io.debezium.connector.oracle.OracleConnector", "rac.nodes": "host1.domain.com:1521/ORCLSID1,host2.domain.com:1522/ORCLSID2", ... -}

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Debezium Server stops sending events to Google Cloud Pub/Sub DBZ-5175

    • Toasted varying character array and date array are not correcly processed DBZ-6122

    • Upgrade to Infinispan 14.0.11.Final to fix CVE-2022-45047 DBZ-6193

    • Introduce LogMiner query filtering modes DBZ-6254

    • Lock contention on LOG_MINING_FLUSH table when multiple connectors deployed DBZ-6256

    • Ensure that the connector can start from a stale timestamp more than one hour into the past DBZ-6307

    • The rs_id field is null in Oracle change event source information block DBZ-6329

    • Add JWT authentication to HTTP Client DBZ-6348

    • Using pg_replication_slot_advance which is not supported by PostgreSQL10. DBZ-6353

    • log.mining.transaction.retention.hours should reference last offset and not sysdate DBZ-6355

    • Support multiple tasks when streaming shard list DBZ-6365

    • Code Improvements for skip.messages.without.change DBZ-6366

    • Kinesis Sink - AWS Credentials Provider DBZ-6372

    • Toasted hstore are not correctly processed DBZ-6379

    • Oracle DDL shrink space for table partition can not be parsed DBZ-6386

    • __source_ts_ms r (read) operation date is set to future for SQL Server DBZ-6388

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-6396

    • Date and Time values without timezones are not persisted correctly based on database.time_zone DBZ-6399

    • MongoDB connector crashes on invalid resume token DBZ-6402

    • Snapshot step 5 - Reading structure of captured tables time too long DBZ-6439

    • NPE on read-only MySQL connector start up DBZ-6440

    • Oracle parallel snapshots do not properly set PDB context when using multitenancy DBZ-6457

    • [MariaDB] Add support for userstat plugin keywords DBZ-6459

    • Debezium Server cannot recover from Google Pub/Sub errors DBZ-6461

    • "Ignoring invalid task provided offset" DBZ-6463

    • Oracle snapshot.include.collection.list should be prefixed with databaseName in documentation. DBZ-6474

    • Db2 connector can fail with NPE on notification sending DBZ-6485

    • ExtractNewRecordState SMT in combination with HeaderToValue SMT results in Unexpected field name exception DBZ-6486

    • BigDecimal fails when queue memory size limit is in place DBZ-6490

    • Allow schema to be specified in the Debezium Sink Connector configuration DBZ-6491

    • ORACLE table can not be captured, got runtime.NoViableAltException DBZ-6492

    • Signal poll interval has incorrect default value DBZ-6496

    • Oracle JDBC driver 23.x throws ORA-18716 - not in any time zone DBZ-6502

    • Alpine postgres images should use llvm/clang 15 explicitly DBZ-6506

    • FileSignalChannel is not loaded DBZ-6509

    • Utilize event.processing.failure.handling.mode in Vitess replication connection DBZ-6510

    • MySqlReadOnlyIncrementalSnapshotChangeEventSource enforces Kafka dependency during initialization DBZ-6511

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6512

    • Error value of negative seconds in convertOracleIntervalDaySecond DBZ-6513

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6515

    • Only use error processing mode on certain errors DBZ-6523

    • [PostgreSQL] LTree data is not being captured by streaming DBZ-6524

    • Oracle Connector: Snapshot fails with specific combination DBZ-6528

    • Use better hashing function for PartitionRouting DBZ-6529

    • Table order is incorrect on snapshots DBZ-6533

    • Start publishing nightly images for Debezium Operator DBZ-6541

    • Start releasing images for Debezium Operator DBZ-6542

    • Unhandled NullPointerException in PartitionRouting will crash the whole connect plugin DBZ-6543

    • Bug in field.name.adjustment.mode Property DBZ-6559

    • Operator sets incorrect value of transformation.predicate when no predicate is specified DBZ-6560

    • Upgrade MySQL JDBC driver to 8.0.33 DBZ-6563

    • Upgrade Google Cloud BOM to 26.17.0 DBZ-6570

    • Kubernetes-Config extension interferes with SSL tests due to k8 devservice starting up DBZ-6574

    • MySQL read-only connector with Kafka signals enabled fails on start up DBZ-6579

    • Redis schema history can fail upon startup DBZ-6580

    What’s next?

    With Debezium 2.3 out, our major focus will be coordinating bugfixes to any reports for Debezium 2.3, but primarily on the upcoming new preview release for Debezium 2.4.

    Debezium 2.4 is planned with a ton of changes, and we intend to tackle these changes in a bucketed fashion, with each bucket being assigned a priority. As with any schedule, priorities are subject to change but the following is an outline of what to expect:

    Priority 1
    • [Core] TimescaleDB single message transformation support

    • [Core] Timezone single message transformation to ease usages with Debezium temporal types

    • [Core] Initial snapshot notifications

    • [MongoDB] Database-wide change stream support

    • [MongoDB] Multi-task deployment metrics support

    • [Oracle] OpenLogReplicator adapter support

    • [Oracle] XML, LONG, RAW, and LONG RAW data type support

    • [Universal] Exactly-Once semantics support for other connectors

    • [Dependencies] Apache Kafka 3.5.x support

    Priority 2
    • [Operator] Next steps for Debezium operator

    • [Core] Ad-hoc blocking snapshot

    • [Dependencies] Use OpenTelemetry

    Priority 3
    • [Embedded Engine] Parallelization support

    • [MongoDB] Parallel incremental snapshots support

    • [MySQL] Parallel schema snapshots support

    This is not an exhaustive list and its quite ambitious, but given the shortened time with Debezium 2.3, we hope the extra few weeks on Debezium 2.4 will make this next minor release possible with all these features and much more. The roadmap will be updated this week to align with the above for Debezium 2.4 and the future, so please be sure to get in touch with us on the mailing list or our chat if you have any ideas or suggestions.

    Otherwise, I expect with summer in full swing for those of us in the north, holidays and much-deserved time-off will be normal for the next several months. To those who travel or intend to take some time for yourselves and family, enjoy and be safe!

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    Other fixes

    There were quite a number of bugfixes and stability changes in this release, some noteworthy are:

    • Debezium Server stops sending events to Google Cloud Pub/Sub DBZ-5175

    • Toasted varying character array and date array are not correcly processed DBZ-6122

    • Upgrade to Infinispan 14.0.11.Final to fix CVE-2022-45047 DBZ-6193

    • Introduce LogMiner query filtering modes DBZ-6254

    • Lock contention on LOG_MINING_FLUSH table when multiple connectors deployed DBZ-6256

    • Ensure that the connector can start from a stale timestamp more than one hour into the past DBZ-6307

    • The rs_id field is null in Oracle change event source information block DBZ-6329

    • Add JWT authentication to HTTP Client DBZ-6348

    • Using pg_replication_slot_advance which is not supported by PostgreSQL10. DBZ-6353

    • log.mining.transaction.retention.hours should reference last offset and not sysdate DBZ-6355

    • Support multiple tasks when streaming shard list DBZ-6365

    • Code Improvements for skip.messages.without.change DBZ-6366

    • Kinesis Sink - AWS Credentials Provider DBZ-6372

    • Toasted hstore are not correctly processed DBZ-6379

    • Oracle DDL shrink space for table partition can not be parsed DBZ-6386

    • __source_ts_ms r (read) operation date is set to future for SQL Server DBZ-6388

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-6396

    • Date and Time values without timezones are not persisted correctly based on database.time_zone DBZ-6399

    • MongoDB connector crashes on invalid resume token DBZ-6402

    • Snapshot step 5 - Reading structure of captured tables time too long DBZ-6439

    • NPE on read-only MySQL connector start up DBZ-6440

    • Oracle parallel snapshots do not properly set PDB context when using multitenancy DBZ-6457

    • [MariaDB] Add support for userstat plugin keywords DBZ-6459

    • Debezium Server cannot recover from Google Pub/Sub errors DBZ-6461

    • "Ignoring invalid task provided offset" DBZ-6463

    • Oracle snapshot.include.collection.list should be prefixed with databaseName in documentation. DBZ-6474

    • Db2 connector can fail with NPE on notification sending DBZ-6485

    • ExtractNewRecordState SMT in combination with HeaderToValue SMT results in Unexpected field name exception DBZ-6486

    • BigDecimal fails when queue memory size limit is in place DBZ-6490

    • Allow schema to be specified in the Debezium Sink Connector configuration DBZ-6491

    • ORACLE table can not be captured, got runtime.NoViableAltException DBZ-6492

    • Signal poll interval has incorrect default value DBZ-6496

    • Oracle JDBC driver 23.x throws ORA-18716 - not in any time zone DBZ-6502

    • Alpine postgres images should use llvm/clang 15 explicitly DBZ-6506

    • FileSignalChannel is not loaded DBZ-6509

    • Utilize event.processing.failure.handling.mode in Vitess replication connection DBZ-6510

    • MySqlReadOnlyIncrementalSnapshotChangeEventSource enforces Kafka dependency during initialization DBZ-6511

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6512

    • Error value of negative seconds in convertOracleIntervalDaySecond DBZ-6513

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6515

    • Only use error processing mode on certain errors DBZ-6523

    • [PostgreSQL] LTree data is not being captured by streaming DBZ-6524

    • Oracle Connector: Snapshot fails with specific combination DBZ-6528

    • Use better hashing function for PartitionRouting DBZ-6529

    • Table order is incorrect on snapshots DBZ-6533

    • Start publishing nightly images for Debezium Operator DBZ-6541

    • Start releasing images for Debezium Operator DBZ-6542

    • Unhandled NullPointerException in PartitionRouting will crash the whole connect plugin DBZ-6543

    • Bug in field.name.adjustment.mode Property DBZ-6559

    • Operator sets incorrect value of transformation.predicate when no predicate is specified DBZ-6560

    • Upgrade MySQL JDBC driver to 8.0.33 DBZ-6563

    • Upgrade Google Cloud BOM to 26.17.0 DBZ-6570

    • Kubernetes-Config extension interferes with SSL tests due to k8 devservice starting up DBZ-6574

    • MySQL read-only connector with Kafka signals enabled fails on start up DBZ-6579

    • Redis schema history can fail upon startup DBZ-6580

    What’s next?

    With Debezium 2.3 out, our major focus will be coordinating bugfixes to any reports for Debezium 2.3, but primarily on the upcoming new preview release for Debezium 2.4.

    Debezium 2.4 is planned with a ton of changes, and we intend to tackle these changes in a bucketed fashion, with each bucket being assigned a priority. As with any schedule, priorities are subject to change but the following is an outline of what to expect:

    Priority 1
    • [Core] TimescaleDB single message transformation support

    • [Core] Timezone single message transformation to ease usages with Debezium temporal types

    • [Core] Initial snapshot notifications

    • [MongoDB] Database-wide change stream support

    • [MongoDB] Multi-task deployment metrics support

    • [Oracle] OpenLogReplicator adapter support

    • [Oracle] XML, LONG, RAW, and LONG RAW data type support

    • [Universal] Exactly-Once semantics support for other connectors

    • [Dependencies] Apache Kafka 3.5.x support

    Priority 2
    • [Operator] Next steps for Debezium operator

    • [Core] Ad-hoc blocking snapshot

    • [Dependencies] Use OpenTelemetry

    Priority 3
    • [Embedded Engine] Parallelization support

    • [MongoDB] Parallel incremental snapshots support

    • [MySQL] Parallel schema snapshots support

    This is not an exhaustive list and its quite ambitious, but given the shortened time with Debezium 2.3, we hope the extra few weeks on Debezium 2.4 will make this next minor release possible with all these features and much more. The roadmap will be updated this week to align with the above for Debezium 2.4 and the future, so please be sure to get in touch with us on the mailing list or our chat if you have any ideas or suggestions.

    Otherwise, I expect with summer in full swing for those of us in the north, holidays and much-deserved time-off will be normal for the next several months. To those who travel or intend to take some time for yourselves and family, enjoy and be safe!

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/06/22/towards-exactly-once-delivery/index.html b/blog/2023/06/22/towards-exactly-once-delivery/index.html index 3862603ffa..c4b625596e 100644 --- a/blog/2023/06/22/towards-exactly-once-delivery/index.html +++ b/blog/2023/06/22/towards-exactly-once-delivery/index.html @@ -122,4 +122,4 @@ WARN: skipping None value, items: 48345 WARN: cannot deserialize record b'\x00\x00\x00\x00\x00\x00' WARN: skipping None value, items: 49999 -Found 0 duplicates in 49999 items (unique values: 49999, skipped values: 54)

    In this case there are no duplicate records, so everything looks good. The only question is what are those skipped events. These are transaction boundaries markers. Python Kafka client for some reason cannot cope with them and fails to deserialize them, so we skip them. Java client should recognize these records and handle them without any issue.

    Summary and the next steps

    In this blog post we have shown how to configure exactly once semantics for Kafka Connect source connectors and how to use it with Debezium Postgres connector. So far, it seems that there are no issues and at least Debezium Postgres connector can work fine with the exactly-once semantics.

    However, not finding an issue of course doesn’t imply that there are no issues. Therefore, as a next step we would like to develop a more rigorous test framework for testing data consistency and exactly-once delivery. We would like to write the tests using the famous Jepsen framework. If we succeed in writing the test, we will share the results in a follow-up blog post. In the meantime we would like to encourage you to test exactly-once delivery also in your environments and deployments to increase the chance to discover any potential bugs. If you run any such test, we would very appreciate if you share the results with us, negative ones when you find a bug as well as positive ones when everything passes.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +Found 0 duplicates in 49999 items (unique values: 49999, skipped values: 54)

    In this case there are no duplicate records, so everything looks good. The only question is what are those skipped events. These are transaction boundaries markers. Python Kafka client for some reason cannot cope with them and fails to deserialize them, so we skip them. Java client should recognize these records and handle them without any issue.

    Summary and the next steps

    In this blog post we have shown how to configure exactly once semantics for Kafka Connect source connectors and how to use it with Debezium Postgres connector. So far, it seems that there are no issues and at least Debezium Postgres connector can work fine with the exactly-once semantics.

    However, not finding an issue of course doesn’t imply that there are no issues. Therefore, as a next step we would like to develop a more rigorous test framework for testing data consistency and exactly-once delivery. We would like to write the tests using the famous Jepsen framework. If we succeed in writing the test, we will share the results in a follow-up blog post. In the meantime we would like to encourage you to test exactly-once delivery also in your environments and deployments to increase the chance to discover any potential bugs. If you run any such test, we would very appreciate if you share the results with us, negative ones when you find a bug as well as positive ones when everything passes.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/06/27/Debezium-signaling-and-notifications/index.html b/blog/2023/06/27/Debezium-signaling-and-notifications/index.html index 584be6e37b..271d04197e 100644 --- a/blog/2023/06/27/Debezium-signaling-and-notifications/index.html +++ b/blog/2023/06/27/Debezium-signaling-and-notifications/index.html @@ -1 +1 @@ - Debezium signaling and notifications - Part 1

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    In today’s interconnected software applications and systems, seamlessly integrating with other products is crucial for building robust and efficient solutions. One of the key challenges in building integrated software ecosystems is establishing effective communication channels between different components and services.

    Debezium introduces a comprehensive signaling and notification system providing seamless integration with any third-party solution. Combining the signal and notification systems, you can now orchestrate complex yet efficient pipelines to stay informed about the status and progress of the state managed by Debezium.

    The new signaling system has been re-designed to be extensible, providing a simplified approach to adding new, custom signal channels on top of the various implementations available. Let’s explore the different signals and notification channels, covering their functionality and describing their benefits.

    Signaling in Debezium

    Signaling in Debezium refers to the mechanism through which users can trigger specific actions within the product. It allows users to interact with Debezium and control some behaviors. One notable use case of signaling is the execution of incremental snapshots, which permits to start an ad-hoc snapshot of database.

    Debezium provides different signaling channels through which users can send signals to the platform. Let’s explore the available signaling channels:

    Signaling through Database Tables

    In earlier versions of Debezium, signaling required a dedicated database table where you would insert specific records to trigger Debezium to execute a task. For example, an inserted row could initiate an incremental snapshot, prompting the connector to run a snapshot operation based on the given signal criteria.

    Signaling through Kafka Topics

    In earlier versions of Debezium, you could send signals via a configured Kafka topic; however, this was only available for MySQL using read-only access and global transaction identifiers (GTID) behavior. With Debezium 2.3, the Kafka signal channel is available to all connectors.

    This enhancement provides a simplified integration approach and a unified and consistent approach for signals across all supported Debezium databases. You can send signals to a specific Kafka topic, and Debezium will consume and process that signal as though it originated from the signal table itself.

    Using a Kafka topic for signals provides several advantages. First, it aligns with event-driven design, making it a natural fit with change data capture and Debezium. Additionally, it provides a secure way to send signals to Debezium without necessarily providing the user with direct access to the underlying database.

    Even when using the Kafka signal approach, the incremental snapshot feature still requires the presence and use of the signaling table to manage some bookkeeping needed for the incremental snapshot process. You can only omit the signal table when using MySQL in a read-only way with global transaction identifiers (GTIDs) enabled.

    Signaling through File

    You can trigger signals from the file system thanks to a contribution by the Debezium user community. This approach is an excellent alternative when not relying on messaging infrastructures like Kafka combined with Debezium Server or the embedded engine or when you cannot use the database for signals.

    Signaling through Java Management Extensions (JMX)

    With this channel, you can send signals calling the signal operation exposed through a dedicated MBean. You can do it by connecting to the MBeanServer with your preferred client. A dedicated post about this feature will follow.

    Signaling through custom channel

    We also redesigned the signaling mechanism to be extensible. You can provide your implementations and use it all via configuration. A dedicated post about this feature will follow.

    Notification in Debezium:

    Notifications focus on retrieving status and progress information about Debezium and its internal processes. It provides insights into the execution and completion of tasks such as initial snapshots or incremental snapshots. Debezium generates notifications to keep users informed about the progress of these tasks, facilitating monitoring and decision-making based on real-time information. By leveraging the notification functionality, users can track the state of Debezium and take appropriate actions based on the received notifications.

    Debezium provides a variety of ways to obtain notifications. Let’s explore the out-of-the-box channels:

    Notification through Sink channel

    This implementation sends notifications through the Connect API to a configured topic. Users can enable this channel and specify the topic name. By publishing notifications to the specified topic, Debezium allows users to consume and process notifications in a way that suits their needs. Since Sink is agnostic (i.e. not necessarily Kafka), this integrates seamlessly with Debezium Server to deliver notifications to any of its sink adapters.

    Notification through logs

    This channel appends notifications directly to the end of the connector’s log. This approach provides convenient access for monitoring, debugging, and analyzing notification details.

    Notifications through Java Management Extensions (JMX)

    This notification channel exposes a dedicated Debezium MBean with several attributes that contain a list of generated notifications. This channel combines common industry standard monitoring techniques like JMX to react and consume Debezium notifications. A dedicated post will follow to discuss this in more detail.

    Notification through custom channel

    Debezium’s notification mechanism is extensible, allowing users to implement custom channels to deliver notifications using means that best fit their needs. We will cover how to provide a custom notification channel in a dedicated post.

    Conclusion

    Signals and Notifications are now foundational features in Debezium. Signaling empowers users to interact with Debezium and trigger actions, while notification provides valuable information about Debezium’s state and progress.

    In previous versions of Debezium, initiating an incremental snapshot was only possible using a signaling table. Users had to configure a specific table as a signaling mechanism to trigger the incremental snapshot. However, for MySQL with GTIDs enabled, it was possible to utilize Kafka for signaling purposes.

    With the release of Debezium version 2.3, we have introduced significant improvements to both the signal and notification subsystems. We’ve unified several behaviors across connectors and made the entire system extensible, aiming to simplify both custom and future contributed implementations. We hope this enhances the overall experience of working with Debezium and provides a way to integrate Debezium with other third-party applications and tools seamlessly. These new and improved features allow you to maximize the capabilities of the Debezium change data capture platform in limitless ways.

    Stay tuned for Part 2, where we will discuss how to customize signaling and notification channels in Debezium.

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium signaling and notifications - Part 1

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    In today’s interconnected software applications and systems, seamlessly integrating with other products is crucial for building robust and efficient solutions. One of the key challenges in building integrated software ecosystems is establishing effective communication channels between different components and services.

    Debezium introduces a comprehensive signaling and notification system providing seamless integration with any third-party solution. Combining the signal and notification systems, you can now orchestrate complex yet efficient pipelines to stay informed about the status and progress of the state managed by Debezium.

    The new signaling system has been re-designed to be extensible, providing a simplified approach to adding new, custom signal channels on top of the various implementations available. Let’s explore the different signals and notification channels, covering their functionality and describing their benefits.

    Signaling in Debezium

    Signaling in Debezium refers to the mechanism through which users can trigger specific actions within the product. It allows users to interact with Debezium and control some behaviors. One notable use case of signaling is the execution of incremental snapshots, which permits to start an ad-hoc snapshot of database.

    Debezium provides different signaling channels through which users can send signals to the platform. Let’s explore the available signaling channels:

    Signaling through Database Tables

    In earlier versions of Debezium, signaling required a dedicated database table where you would insert specific records to trigger Debezium to execute a task. For example, an inserted row could initiate an incremental snapshot, prompting the connector to run a snapshot operation based on the given signal criteria.

    Signaling through Kafka Topics

    In earlier versions of Debezium, you could send signals via a configured Kafka topic; however, this was only available for MySQL using read-only access and global transaction identifiers (GTID) behavior. With Debezium 2.3, the Kafka signal channel is available to all connectors.

    This enhancement provides a simplified integration approach and a unified and consistent approach for signals across all supported Debezium databases. You can send signals to a specific Kafka topic, and Debezium will consume and process that signal as though it originated from the signal table itself.

    Using a Kafka topic for signals provides several advantages. First, it aligns with event-driven design, making it a natural fit with change data capture and Debezium. Additionally, it provides a secure way to send signals to Debezium without necessarily providing the user with direct access to the underlying database.

    Even when using the Kafka signal approach, the incremental snapshot feature still requires the presence and use of the signaling table to manage some bookkeeping needed for the incremental snapshot process. You can only omit the signal table when using MySQL in a read-only way with global transaction identifiers (GTIDs) enabled.

    Signaling through File

    You can trigger signals from the file system thanks to a contribution by the Debezium user community. This approach is an excellent alternative when not relying on messaging infrastructures like Kafka combined with Debezium Server or the embedded engine or when you cannot use the database for signals.

    Signaling through Java Management Extensions (JMX)

    With this channel, you can send signals calling the signal operation exposed through a dedicated MBean. You can do it by connecting to the MBeanServer with your preferred client. A dedicated post about this feature will follow.

    Signaling through custom channel

    We also redesigned the signaling mechanism to be extensible. You can provide your implementations and use it all via configuration. A dedicated post about this feature will follow.

    Notification in Debezium:

    Notifications focus on retrieving status and progress information about Debezium and its internal processes. It provides insights into the execution and completion of tasks such as initial snapshots or incremental snapshots. Debezium generates notifications to keep users informed about the progress of these tasks, facilitating monitoring and decision-making based on real-time information. By leveraging the notification functionality, users can track the state of Debezium and take appropriate actions based on the received notifications.

    Debezium provides a variety of ways to obtain notifications. Let’s explore the out-of-the-box channels:

    Notification through Sink channel

    This implementation sends notifications through the Connect API to a configured topic. Users can enable this channel and specify the topic name. By publishing notifications to the specified topic, Debezium allows users to consume and process notifications in a way that suits their needs. Since Sink is agnostic (i.e. not necessarily Kafka), this integrates seamlessly with Debezium Server to deliver notifications to any of its sink adapters.

    Notification through logs

    This channel appends notifications directly to the end of the connector’s log. This approach provides convenient access for monitoring, debugging, and analyzing notification details.

    Notifications through Java Management Extensions (JMX)

    This notification channel exposes a dedicated Debezium MBean with several attributes that contain a list of generated notifications. This channel combines common industry standard monitoring techniques like JMX to react and consume Debezium notifications. A dedicated post will follow to discuss this in more detail.

    Notification through custom channel

    Debezium’s notification mechanism is extensible, allowing users to implement custom channels to deliver notifications using means that best fit their needs. We will cover how to provide a custom notification channel in a dedicated post.

    Conclusion

    Signals and Notifications are now foundational features in Debezium. Signaling empowers users to interact with Debezium and trigger actions, while notification provides valuable information about Debezium’s state and progress.

    In previous versions of Debezium, initiating an incremental snapshot was only possible using a signaling table. Users had to configure a specific table as a signaling mechanism to trigger the incremental snapshot. However, for MySQL with GTIDs enabled, it was possible to utilize Kafka for signaling purposes.

    With the release of Debezium version 2.3, we have introduced significant improvements to both the signal and notification subsystems. We’ve unified several behaviors across connectors and made the entire system extensible, aiming to simplify both custom and future contributed implementations. We hope this enhances the overall experience of working with Debezium and provides a way to integrate Debezium with other third-party applications and tools seamlessly. These new and improved features allow you to maximize the capabilities of the Debezium change data capture platform in limitless ways.

    Stay tuned for Part 2, where we will discuss how to customize signaling and notification channels in Debezium.

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/06/29/debezium-oracle-series-part-3/index.html b/blog/2023/06/29/debezium-oracle-series-part-3/index.html index 8bdb309abb..52b96859c7 100644 --- a/blog/2023/06/29/debezium-oracle-series-part-3/index.html +++ b/blog/2023/06/29/debezium-oracle-series-part-3/index.html @@ -1,4 +1,4 @@ Debezium for Oracle - Part 3: Performance and Debugging

    This post is the final part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first installment of this series is found here and the second installment is found here.

    In this third and final installment, we are going to build on what we have done in the previous two posts, focusing on the following areas:

    What to know before we start

    This installment is going to focus on a number of highly technical Oracle database features, and these often are within the realm of an Oracle database administrator’s area of expertise. Depending on your permissions and roles within your environment, you may not necessarily have access to all the tools and commands that will be discussed herein, so you may need to coordinate with your database administrator if you are working in a non-local environment.

    Additionally, this post picks where we last left off in Part 2. We had previously deployed a full Kafka ecosystem with Zookeeper, a Kafka broker, and a Kafka Connect environment. These are not necessarily a requirement in this series, but if you would like to follow along manually, I encourage you to follow the steps in the earlier parts of the series to quickly get started.

    What is Oracle and why is it complex?

    I touched on the first part of this briefly in Part 1, but I believe it bears a much deeper answer to support some of the technical concepts we’re going to discuss today.

    A common take-away from newcomers or those unfamiliar with Oracle often ask, "Why is everything with Oracle always complex?". Most often, these users either have little Oracle database experience or have exposure to other open source database solutions that in broad, general terms are easier and simpler to use, particularly directly out of the box. So why is that?

    Oracle was first released to the public over four decades ago in 1979, making it the oldest and the most popular database in the world. In the top five most popular, Microsoft SQL Server is the next oldest at three decades released in 1989 and the others all being half the age of Oracle or less.

    What has helped drive Oracle’s market share has been its ability to innovate fast, retain compatibility with existing database platform users, and being flexible enough to provide you features now that you may not need until the future. This has lent itself to allow Oracle to thrive unlike its competition, but we all know that flexibility often comes at the cost of other things; and traditionally that has been at the hand of ease of use. Oracle has a broad and extensive arsenal of tools at your disposal, but these are often tightly coupled leading to complex installations and configurations, but the advantage beyond its flexibility is that it’s the best at what it does, so often the trade-off is worth it.

    As we go through the following parts of this installment, I hope this context provides a newfound perspective. While it may seem commonplace to think that many of Oracle’s oddities when compared to its competition are pain points, in reality they’re strengths that have perpetually defined Oracle at the forefront of a critical space in the age of big data.

    Performance

    When choosing to use change data capture (CDC) pipelines, low-latency is very often a driving factor. Whether you are using CDC to provide event-driven communication between microservices or complex data replication, it’s important that events arrive as soon as possible, so throughput and performance are often at the forefront of evaluating a solution’s merit, often immediately after reliability.

    In this section, we’re going to cover several configuration properties of the Debezium Oracle connector that can allow you to optimize the connector’s performance based on your environment and needs.

    Disk IO

    A fundamental aspect of the Debezium Oracle connector’s implementation is that it uses Oracle APIs to work with the LogMiner process to read the redo and archive logs. These logs are managed by two Oracle processes, the Oracle LogWriter (LGWR) and the Oracle Archiver (ARCH). The details around these two processes are less important to the discussion other than the fact they’re responsible for management of both the current redo logs used by the Oracle database and the creation of the archive logs that contain the past historical changes made to Oracle.

    The Debezium Oracle connector uses the LogMiner API to read these redo and archive logs off disk and generate change events. Unfortunately, redo and archive logs cannot be retained on the database server indefinitely. Often, logs can be several gigabytes in size, and when combined with supplemental log configurations, an Oracle database can generate many logs in short periods of time, thus disk space gets consumed quickly. When these redo or archive logs exist on a networked drive or a high-latency device, this has a direct impact on LogMiner’s ability to read and provide the change event data to the connector as quickly as possible.

    One possible way to improve performance is to coordinate with your Oracle database administrator to see if you can retain more archive logs longer even if its on a separate mounted disk but local to the database machine. Oracle provides the ability to define what is called log archive destinations, and it supports up to a maximum of 31 different paths where archive logs can be written by the Archiver process.

    It’s not uncommon that your database administrator may have configured several log archive destinations already for other processes such as GoldenGate, DataGuard, etc. If such paths have been defined, and they have a log retention policy that aligns with the ingestion rate, you can safely use Debezium with those destinations if one already exists. If no path has been defined, you can create a new one following the aforementioned Oracle documentation link.

    To have Debezium use a specific log archive destination, the log.mining.archive.destination.name connector property must be provided.

    An example using LOG_ARCHIVE_DEST_5
    {
       "log.mining.archive.destination.name": "LOG_ARCHIVE_DEST_5",
       ...
    -}

    The physical path to the logs will be obtained from Oracle directly, you only need to specify the destination name the database administrator configured.

    This configuration is only applicable for use with the Oracle LogMiner adapter and has no impact when ingesting changes using Oracle XStream.

    Redo Log Size

    Oracle’s redo logs are used primarily for recovery and instance failures. When an instance is first created, the administrator provides a starting size for redo logs. If the size of the redo logs is too small or even too large, this can have a direct impact on the performance of your instance.

    For example, the size of redo logs has a direct impact on how frequent the Oracle Archiver (ARCH) process transitions redo logs to archive logs, which is referred to as a log switch. Generally, Oracle recommends that administrators minimize the number of log switches in small windows, but this can vary depending on a number of factors like volume or logging configurations.

    A log switch is a fairly expensive operation because it’s the moment in time when a redo log is copied by the Archiver process to an archive log and a new redo log is allocated. If there is ever a period when the Archiver process falls behind and all redo logs have filled, Oracle’s database can degrade or even halt if a checkpoint cannot occur because all current redo logs are full and awaiting archival.

    If you are using an image of Oracle based on Oracle Docker Images, you will have noticed that by default the redo logs created are extremely small, several megabytes each. For development purposes, this is fine out of the box, but when using such an instance for any type of serious integration such as Debezium; this simply doesn’t work well, especially with the default mining strategy which we’ll discuss in more detail in the next section.

    However, small redo log sizes aren’t the only problem. If the redo log files are sized too large, this can have an adverse impact on the read time from disk, making the gap while the connector waits for changes even longer as there is the need to perform more Disk IO due to larger files.

    Resizing Oracle’s redo logs requires existing knowledge of the database server paths and where it is safe to store those files; therefore, since that information is environment dependent, we aren’t going to cover directly how to do this here. Oracle provides excellent documentation on how to perform this task.

    Unfortunately, there isn’t a simple answer for what size you should use. This requires a bit of finesse, science, and heuristics of your environment to gauge what is the best choice, but this is something in your arsenal that could be adjusted if necessary.

    Log Mining Strategy

    In Part 2, we covered two log mining strategies for the Debezium Oracle connector. These strategies control how the connector interacts with Oracle LogMiner and how specific entries from the redo logs are ingested for both schema and table changes.

    Redo logs store redo entries and not all redo entries store explicitly every pierce of data needed to re-construct the change that occurred. For example, DML operations (inserts, updates, deletes) do not refer to table or column names but rather object identifiers. These object identifier and version details change in the data dictionary as column or table modifications (DDL changes) occur. This means that the identifier and/or its version will differ from a redo entry for the same table before and after a schema change.

    The log mining strategy controls precisely how redo entries are interpreted by Oracle LogMiner, primarily by either writing the data dictionary to the end of the redo logs or omitting this step. There are benefits to using either strategy and we’re going to dive into what those are and why you may use one strategy over another.

    Default Mining Strategy

    The default mining strategy is the safest choice, but it is also the most expensive. This strategy will append a copy of the data dictionary to the redo logs when a log switch is observed.

    This strategy’s main benefit is schema and data changes are ingested seamlessly by Oracle LogMiner. In other words, if an INSERT is followed by an ALTER TABLE and that is followed by an UPDATE, Oracle LogMiner will safely deduce the right table and column names from the old and the new object ids and versions. This means that Debezium will be able to safely ingest that change event as one might expect.

    The unfortunate pain point of this strategy is that it’s an expensive step at each log switch.

    First, it requires that the connector append a copy of the data dictionary periodically to the redo logs and Oracle performs a full log switch (all log groups perform a switch) after writing the dictionary. This means that more archive logs will be generated than when using the online catalog strategy we’ll discuss momentarily.

    Secondly, it also requires that when a LogMiner process begins to mine the redo logs, it must first read and prepare a section of the SGA with all the dictionary metadata so that resolution of table and column names can happen properly. Depending on the size of the redo logs, and more appropriately the dictionary segment of the logs, this can take upwards of several minutes to prepare. So you can probably guess that when you combine this strategy with a poorly sized redo logs, this can easily create a performance bottleneck.

    It is not recommended to ever deploy multiple Oracle connectors using this strategy, but instead use a single Oracle connector.

    Online Catalog Strategy

    The online catalog mining strategy is used when specifying the log.mining.strategy connector configuration property with the value online_catalog. Unlike the default mining strategy, this strategy does not write any additional data to the redo logs, but instead, relies on the current data dictionary to resolve table and column names.

    The benefit to this strategy is that since we are not writing any dictionary details to the redo logs, redo logs will only transition to archive logs based on existing database activity. In short, Debezium will not influence this frequency beyond the additional supplemental logging configuration required, making it easier to manage the volume of archive logs created. Secondly, because no dictionary details are written to the logs and the number of log switches remains constant to existing behavior, a mining session starts nearly instantaneously and there is no need for LogMiner to prepare any dictionary metadata as the existing data dictionary satisfies that requirement as-is.

    Unfortunately, this strategy does have a single restriction and that is schema changes are not observed seamlessly. In other words, if a redo entry refers to an object id/version that does not match the object id/version in the online data dictionary, Oracle LogMiner is incapable of reconstructing the SQL for that operation.

    However, schema changes can be handled with this strategy, but it requires doing schema changes in a lock-step fashion. In other words, you would you halt changes on the table, wait until the last change for the table has been captured by Debezium, apply the schema change, wait for the schema change to be emitted by Debezium, and finally resume allowing changes to the data in the table.

    This strategy provides the optimal performance gain both for Oracle and the connector.

    The only requirement is that if a table’s schema isn’t static, and you may have changes to it periodically, if you can perform the schema changes in lock-step as described above, you can safely perform schema changes using this strategy; otherwise schema changes should be avoided on the table(s) being captured.

    Finally, this strategy should be used if deploying multiple Oracle connectors on the same Oracle database.

    In conclusion, the mining strategy chosen can have significant impacts on the performance of the database as well as the ingestion rate of the Debezium Oracle connector. It’s important to weigh the benefits and consequences of this choice based on what is possible given your environment.

    There is an effort underway to bridge these two strategies and deliver a solution that provides all the performance benefits of the online catalog strategy and the seamless schema management provided by the default mining strategy. The progress for this effort can be found in DBZ-3401.

    Batch size

    The Debezium Oracle connector uses an adaptive batch size algorithm to determine the number of rows that will be fetched per database call. The algorithm is controlled by the following configuration properties:

    log.mining.batch.size.default

    This specifies the default number of rows that will be fetched per database call.

    log.mining.batch.size.min

    This specifies the minimum number of database rows that will be fetched per database call.

    log.mining.batch.size.max

    This specifies the maximum number of database rows that will be fetched per database call.

    These settings give the connector the ability to read more data and reduce network latency when the connector has fallen behind or observed a large transaction in the logs at the expense of consuming more SGA and JVM memory temporarily and using less SGA and JVM memory when the connector has caught up to near real-time changes.

    The connector defaults for these are great starting points, but depending on your change event volume, it may be wise to increase or even shrink these settings based on your environment to improve performance.

    Query filter mode

    Any developer who has ever worked on a SQL-based application will tell you that just because a query performs well in one environment or at one point in time doesn’t mean that the same query will be as efficient in another environment or even the future as the data set changes. That’s why with Debezium 2.3, we added a new feature called log.mining.query.filter.mode. Through various discussions with Oracle community users with various installations, volume sizes, and integrations, we concluded that the LogMiner query used by the Oracle connector simply cannot be a one-size fits all solution.

    In order to be the most efficient, we needed to provide the user community with a way they can tune the LogMiner query that best satisfies their configuration and their environment. There are currently three options for how the LogMiner query is constructed and each influence how the query’s where-clause is generated.

    none

    Specifies that no additional predicates are added to the LogMiner query.

    Instead, all filtering is delegated primarily to the Oracle connector’s Java runtime and not the database query. This has the highest network bandwidth usage of all the options and can have the highest throughput depending on the volume and the data-set of redo entries. For lower volume installations, this can easily perform the fastest but does not scale well as the volume of redo entries increases or if the data-set of interest is smaller than the total of the data-set.

    in

    Specifies that the schema and table include/exclude filters are applied using a SQL in-clause.

    By default, the include/exclude configuration options support comma-separated lists of regular expressions; however, if you elect to avoid the use of regular expressions, you can apply database-level filters to the LogMiner query more efficiently by using this query filter mode. An in-clause is much more efficient over using disjunctions or Oracle’s REGEXP_LIKE operator which we’ll discuss with the next option. This also performs extremely well if you have a lot of schema or table include/exclude list options defined in your configuration. And finally, because this choice performs database-level filtering, this reduces the network latency and only returns the necessary rows to the connector.

    regex

    Specifies that the schema and table include/exclude filters are applied using the SQL operator REGEXP_LIKE.

    Since the include/exclude configuration options support comma-separated lists of regular expressions, must be used instead of in when using regular expressions. While this option performs database-level filtering much like the in-clause choice, the use of regular expressions degrades in performance as more include/exclude options are specified in the connector configuration. Therefore, in order to maximize performance, its generally best when using regular expressions to write as few expressions that match the most tables or schemas as possible to reduce the number of predicates appended to the query.

    As of Debezium 2.3, the default is none, so you can gain additional performance by specifically configuring the log.mining.query.filter.mode to use in ir regex depending on the values provided in your include/exclude list configuration properties.

    Debugging

    As much as I would like to think Software Engineering is butterflies and flowers; it’s far from the truth and managing an environment that runs software is no different. When problems occur, it’s important to have the knowledge to self-diagnose and get back to a running state as quickly as possible. So we’re going down the rabbit-hole and discuss a number of common errors that we’ve seen, how do you debug those errors, and what might be the potential fixes.

    None of the log files contains offset SCN, re-snapshot is required

    I’m fairly certain that at some point most Oracle connector users have seen this error in the logs, whether during PoC design or testing, but hopefully not production. The error message itself is relatively clear, but what is often not clear is "why did this happen".

    For other databases, their transaction logs only contain committed changes that are then consumed by Debezium. Unfortunately, Oracle does not do this and instead writes every single change to the transaction logs, even if the change is later rolled back due to a constraint violation or an explicit user or system rollback. This means that reading the changes from the Oracle redo and archive logs isn’t as simple as reading from position X until the end of the file and then repeat with the next log in sequence. Instead, the connector must maintain what we call a low and high watermark SCN, or if you’re familiar with the connector’s offsets these are represented as scn and commit_scn.

    The low watermark or scn represents the safe resume point in the redo logs. Generally this points to the position in the logs where the oldest in-progress transaction started. The high watermark or commit_scn represents the position in the logs where we last emitted a transaction batch for a given redo thread. This means that the changes in between these two values are a mixture of uncommitted changes, committed changes, or rolled back changes.

    When the connector starts, the low watermark or scn read from the offsets is compared to the oldest available archive log in Oracle. If the archive log begins with a system change number that comes after this scn value, this error will occur.

    Long-running transactions directly impact the low watermark or scn position. If a transaction remains active for longer than your archive log retention policy and the connector is restarted due to a re-balance or failure, this error can occur. If you suspect long-running transactions, you can configure the log.mining.transaction.retention.ms property in order to discard a transaction that lives longer than the specified value. While this does cause data loss as that transaction’s changes are discarded, it does allow the low watermark to safely advance forward at a reasonable pace even when long-running transactions occur. You should set the transaction retention period to a value less than your archive log retention period.

    Another use case that can raise this error is if you are capturing changes from an Oracle database with a low volume of changes. In particular, if you are using an older version of Debezium where the LogMiner query applied database-level filters or you’ve configured the new query filter mode to apply database-level filters, it’s possible that the connector may go extended periods of time without observing a single change event. Since offset data only synchronizes when the connector sends an event to Kafka, low volumes of changes in a large window of time can mean those Kafka offsets become stale and if a restart occurs, this error could happen. In this use case, configuring both the heartbeat.internval.ms and heartbeat.action.query connector properties is a great way to guarantee that there is some activity flowing to Kafka to keep those offsets from becoming stale.

    ORA-01555: snapshot too old

    This specific error is most commonly observed during the connector’s initial snapshot. The Oracle connector relies on what is called flashback queries during the initial snapshot phase.

    A flashback query is a standard SELECT statement that uses a system change number in order to generate a result-set based on the state of the data at that given point in the database’s lifetime. This can be useful for a variety of reasons, including being able to restore objects without the need for media recovery because Oracle is capable of retaining that previous state for a certain period of time. The data returned by these queries use the Automatic Undo Management (AUM) subsystem and rely on the undo data area where transactions are recorded and retained for a given period of time, configurable based on the database parameter UNDO_RETENTION.

    If the SCN used for the flashback query becomes too old and the undo retention no longer maintains historical data for that system change number, Oracle will report an ORA-01555 error that the snapshot is now too old. When this happens during your initial snapshot, the snapshot will need to be retaken from the beginning and unless you reconfigure Oracle’s undo retention period to allow for a longer retention time, rerunning the snapshot on the same data set will result in the same outcome.

    So either a) have your DBA increase the UNDO_RETENTION database parameter temporarily or b) use a schema-only snapshot and then rely on incremental snapshots to generate the initial data set from your existing table data.

    Streaming changes takes several minutes to appear

    Occasionally users will notice there is a latency when the connector first starts or at specific periods during the connector’s lifetime. One of the best ways to identify what is going on is to coordinate with your DBA and to take a close look at your database’s alert log, which records all the LogMiner and XStream interactions that Debezium performs with your database. But most often this latency is quite common for users who use the default log mining strategy.

    The default mining strategy that we covered earlier performs a what is called a data dictionary build step and depending on your database, this can take some time to be written to your redo logs and then parsed by the LogMiner process. It’s not uncommon for this to take 30 seconds up to several minutes to complete, and when using the default mining strategy, this process occurs on each log switch.

    So we normally suggest that if you experience this latency frequently that you check the frequency of your log switches. If your database is performing excessive log switches within a small window that does not adhere to Oracle’s guidelines, your DBA may need to tune the database accordingly. Reducing the frequency of log switches increases the time that Debzium can reuse the same log file for mining and therefore reduces the need to build and parse the data dictionary.

    If your table schema does not change often or won’t change at all, you can reconfigure the connector to use the online_catalog mining strategy as an alternative. This avoids the writing the data dictionary to the redo logs and the parse phase performed by LogMiner, greatly increasing the speed at which a mining session begins at both connector start-up and at each log switch interval.

    How do you know if an archive log can be deleted?

    Database administrators typically keep archive logs on the database server for a short period of time before they’re removed. This interval varies and depends on a number of factors including the frequency the logs are created, their respective sizes, and the physical space available on the server. What is most important is that if Debezium requires a specific archive log that it remains available until it’s no longer needed for CDC.

    The easiest way to determine what logs are needed is via JMX metrics, looking at the field OffsetScn. This field references the system change number where the connector will resume from in case of a restart, and so any archive or redo log that contains this system change number or comes after this change number must remain available.

    Debezium 2.4 will add another JMX metric that will provide the cut-off timestamp for archive logs based on this OffsetScn. This means that you will be able to use this timestamp directly in shell scripts to compare the filesystem’s timestamp with the one from JMX metrics, safely knowing which logs can must be retained and which can safely be removed via RMAN.

    Memory footprint, how do you manage it efficiently?

    Due to the nature of how transaction data is written to the Oracle archive and redo logs, a buffer of the transaction state must be managed by the connector. Under ideal circumstances, this buffer maintains short-lived data, the transaction starts, we buffer its relevant changes, and we observe the rollback or commit, and the data managed in the buffer is handled and the buffer cleared.

    Because the connector buffers transactions, it’s extremely important that you have some prior knowledge of your environment’s transaction patterns. If this information can vary and cannot be predicted, you may want to consider using an alternative buffer type than the default Heap (memory) based buffer as this can easily lead to OutOfMemory exceptions under these circumstances when memory is configured too low.

    Please refer to the documentation about Event Buffering. The Oracle connector offers two Infinispan based solutions that allow the connector store the buffer off-heap, reducing the connector’s memory footprint and being capable of dealing with very large transactions seamlessly.

    Lastly, we have composed a collection of frequently asked questions in the documentation. We generally do our best to keep the most commonly asked questions there as a reference. Please take a moment and read through those and if you suspect anything may be missing, please open a Jira issue.

    Wrapping up

    I really hope this series on the Oracle connector has been helpful and informative. We’ve covered topics ranging from installation, configuration, deploying the connector, performance optimizations, and how to debug or evaluate specific common use cases we hear from the community.

    As I mentioned earlier, Oracle is unlike most other database platforms and requires a bit more care and precision to maximize it’s potential. When evaluating the Oracle connector, it is important that you coordinate with an Oracle administrator professional to make sure that you’re maximizing the connector’s potential, particularly if performance is a critical metric in your evaluation.

    As always, if you have questions about anything related to the content in this post or about something you observe in your environment, the team will do their best to provide you with the answers you need either by using our mailing list or reaching out to us on our chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    The physical path to the logs will be obtained from Oracle directly, you only need to specify the destination name the database administrator configured.

    This configuration is only applicable for use with the Oracle LogMiner adapter and has no impact when ingesting changes using Oracle XStream.

    Redo Log Size

    Oracle’s redo logs are used primarily for recovery and instance failures. When an instance is first created, the administrator provides a starting size for redo logs. If the size of the redo logs is too small or even too large, this can have a direct impact on the performance of your instance.

    For example, the size of redo logs has a direct impact on how frequent the Oracle Archiver (ARCH) process transitions redo logs to archive logs, which is referred to as a log switch. Generally, Oracle recommends that administrators minimize the number of log switches in small windows, but this can vary depending on a number of factors like volume or logging configurations.

    A log switch is a fairly expensive operation because it’s the moment in time when a redo log is copied by the Archiver process to an archive log and a new redo log is allocated. If there is ever a period when the Archiver process falls behind and all redo logs have filled, Oracle’s database can degrade or even halt if a checkpoint cannot occur because all current redo logs are full and awaiting archival.

    If you are using an image of Oracle based on Oracle Docker Images, you will have noticed that by default the redo logs created are extremely small, several megabytes each. For development purposes, this is fine out of the box, but when using such an instance for any type of serious integration such as Debezium; this simply doesn’t work well, especially with the default mining strategy which we’ll discuss in more detail in the next section.

    However, small redo log sizes aren’t the only problem. If the redo log files are sized too large, this can have an adverse impact on the read time from disk, making the gap while the connector waits for changes even longer as there is the need to perform more Disk IO due to larger files.

    Resizing Oracle’s redo logs requires existing knowledge of the database server paths and where it is safe to store those files; therefore, since that information is environment dependent, we aren’t going to cover directly how to do this here. Oracle provides excellent documentation on how to perform this task.

    Unfortunately, there isn’t a simple answer for what size you should use. This requires a bit of finesse, science, and heuristics of your environment to gauge what is the best choice, but this is something in your arsenal that could be adjusted if necessary.

    Log Mining Strategy

    In Part 2, we covered two log mining strategies for the Debezium Oracle connector. These strategies control how the connector interacts with Oracle LogMiner and how specific entries from the redo logs are ingested for both schema and table changes.

    Redo logs store redo entries and not all redo entries store explicitly every pierce of data needed to re-construct the change that occurred. For example, DML operations (inserts, updates, deletes) do not refer to table or column names but rather object identifiers. These object identifier and version details change in the data dictionary as column or table modifications (DDL changes) occur. This means that the identifier and/or its version will differ from a redo entry for the same table before and after a schema change.

    The log mining strategy controls precisely how redo entries are interpreted by Oracle LogMiner, primarily by either writing the data dictionary to the end of the redo logs or omitting this step. There are benefits to using either strategy and we’re going to dive into what those are and why you may use one strategy over another.

    Default Mining Strategy

    The default mining strategy is the safest choice, but it is also the most expensive. This strategy will append a copy of the data dictionary to the redo logs when a log switch is observed.

    This strategy’s main benefit is schema and data changes are ingested seamlessly by Oracle LogMiner. In other words, if an INSERT is followed by an ALTER TABLE and that is followed by an UPDATE, Oracle LogMiner will safely deduce the right table and column names from the old and the new object ids and versions. This means that Debezium will be able to safely ingest that change event as one might expect.

    The unfortunate pain point of this strategy is that it’s an expensive step at each log switch.

    First, it requires that the connector append a copy of the data dictionary periodically to the redo logs and Oracle performs a full log switch (all log groups perform a switch) after writing the dictionary. This means that more archive logs will be generated than when using the online catalog strategy we’ll discuss momentarily.

    Secondly, it also requires that when a LogMiner process begins to mine the redo logs, it must first read and prepare a section of the SGA with all the dictionary metadata so that resolution of table and column names can happen properly. Depending on the size of the redo logs, and more appropriately the dictionary segment of the logs, this can take upwards of several minutes to prepare. So you can probably guess that when you combine this strategy with a poorly sized redo logs, this can easily create a performance bottleneck.

    It is not recommended to ever deploy multiple Oracle connectors using this strategy, but instead use a single Oracle connector.

    Online Catalog Strategy

    The online catalog mining strategy is used when specifying the log.mining.strategy connector configuration property with the value online_catalog. Unlike the default mining strategy, this strategy does not write any additional data to the redo logs, but instead, relies on the current data dictionary to resolve table and column names.

    The benefit to this strategy is that since we are not writing any dictionary details to the redo logs, redo logs will only transition to archive logs based on existing database activity. In short, Debezium will not influence this frequency beyond the additional supplemental logging configuration required, making it easier to manage the volume of archive logs created. Secondly, because no dictionary details are written to the logs and the number of log switches remains constant to existing behavior, a mining session starts nearly instantaneously and there is no need for LogMiner to prepare any dictionary metadata as the existing data dictionary satisfies that requirement as-is.

    Unfortunately, this strategy does have a single restriction and that is schema changes are not observed seamlessly. In other words, if a redo entry refers to an object id/version that does not match the object id/version in the online data dictionary, Oracle LogMiner is incapable of reconstructing the SQL for that operation.

    However, schema changes can be handled with this strategy, but it requires doing schema changes in a lock-step fashion. In other words, you would you halt changes on the table, wait until the last change for the table has been captured by Debezium, apply the schema change, wait for the schema change to be emitted by Debezium, and finally resume allowing changes to the data in the table.

    This strategy provides the optimal performance gain both for Oracle and the connector.

    The only requirement is that if a table’s schema isn’t static, and you may have changes to it periodically, if you can perform the schema changes in lock-step as described above, you can safely perform schema changes using this strategy; otherwise schema changes should be avoided on the table(s) being captured.

    Finally, this strategy should be used if deploying multiple Oracle connectors on the same Oracle database.

    In conclusion, the mining strategy chosen can have significant impacts on the performance of the database as well as the ingestion rate of the Debezium Oracle connector. It’s important to weigh the benefits and consequences of this choice based on what is possible given your environment.

    There is an effort underway to bridge these two strategies and deliver a solution that provides all the performance benefits of the online catalog strategy and the seamless schema management provided by the default mining strategy. The progress for this effort can be found in DBZ-3401.

    Batch size

    The Debezium Oracle connector uses an adaptive batch size algorithm to determine the number of rows that will be fetched per database call. The algorithm is controlled by the following configuration properties:

    log.mining.batch.size.default

    This specifies the default number of rows that will be fetched per database call.

    log.mining.batch.size.min

    This specifies the minimum number of database rows that will be fetched per database call.

    log.mining.batch.size.max

    This specifies the maximum number of database rows that will be fetched per database call.

    These settings give the connector the ability to read more data and reduce network latency when the connector has fallen behind or observed a large transaction in the logs at the expense of consuming more SGA and JVM memory temporarily and using less SGA and JVM memory when the connector has caught up to near real-time changes.

    The connector defaults for these are great starting points, but depending on your change event volume, it may be wise to increase or even shrink these settings based on your environment to improve performance.

    Query filter mode

    Any developer who has ever worked on a SQL-based application will tell you that just because a query performs well in one environment or at one point in time doesn’t mean that the same query will be as efficient in another environment or even the future as the data set changes. That’s why with Debezium 2.3, we added a new feature called log.mining.query.filter.mode. Through various discussions with Oracle community users with various installations, volume sizes, and integrations, we concluded that the LogMiner query used by the Oracle connector simply cannot be a one-size fits all solution.

    In order to be the most efficient, we needed to provide the user community with a way they can tune the LogMiner query that best satisfies their configuration and their environment. There are currently three options for how the LogMiner query is constructed and each influence how the query’s where-clause is generated.

    none

    Specifies that no additional predicates are added to the LogMiner query.

    Instead, all filtering is delegated primarily to the Oracle connector’s Java runtime and not the database query. This has the highest network bandwidth usage of all the options and can have the highest throughput depending on the volume and the data-set of redo entries. For lower volume installations, this can easily perform the fastest but does not scale well as the volume of redo entries increases or if the data-set of interest is smaller than the total of the data-set.

    in

    Specifies that the schema and table include/exclude filters are applied using a SQL in-clause.

    By default, the include/exclude configuration options support comma-separated lists of regular expressions; however, if you elect to avoid the use of regular expressions, you can apply database-level filters to the LogMiner query more efficiently by using this query filter mode. An in-clause is much more efficient over using disjunctions or Oracle’s REGEXP_LIKE operator which we’ll discuss with the next option. This also performs extremely well if you have a lot of schema or table include/exclude list options defined in your configuration. And finally, because this choice performs database-level filtering, this reduces the network latency and only returns the necessary rows to the connector.

    regex

    Specifies that the schema and table include/exclude filters are applied using the SQL operator REGEXP_LIKE.

    Since the include/exclude configuration options support comma-separated lists of regular expressions, must be used instead of in when using regular expressions. While this option performs database-level filtering much like the in-clause choice, the use of regular expressions degrades in performance as more include/exclude options are specified in the connector configuration. Therefore, in order to maximize performance, its generally best when using regular expressions to write as few expressions that match the most tables or schemas as possible to reduce the number of predicates appended to the query.

    As of Debezium 2.3, the default is none, so you can gain additional performance by specifically configuring the log.mining.query.filter.mode to use in ir regex depending on the values provided in your include/exclude list configuration properties.

    Debugging

    As much as I would like to think Software Engineering is butterflies and flowers; it’s far from the truth and managing an environment that runs software is no different. When problems occur, it’s important to have the knowledge to self-diagnose and get back to a running state as quickly as possible. So we’re going down the rabbit-hole and discuss a number of common errors that we’ve seen, how do you debug those errors, and what might be the potential fixes.

    None of the log files contains offset SCN, re-snapshot is required

    I’m fairly certain that at some point most Oracle connector users have seen this error in the logs, whether during PoC design or testing, but hopefully not production. The error message itself is relatively clear, but what is often not clear is "why did this happen".

    For other databases, their transaction logs only contain committed changes that are then consumed by Debezium. Unfortunately, Oracle does not do this and instead writes every single change to the transaction logs, even if the change is later rolled back due to a constraint violation or an explicit user or system rollback. This means that reading the changes from the Oracle redo and archive logs isn’t as simple as reading from position X until the end of the file and then repeat with the next log in sequence. Instead, the connector must maintain what we call a low and high watermark SCN, or if you’re familiar with the connector’s offsets these are represented as scn and commit_scn.

    The low watermark or scn represents the safe resume point in the redo logs. Generally this points to the position in the logs where the oldest in-progress transaction started. The high watermark or commit_scn represents the position in the logs where we last emitted a transaction batch for a given redo thread. This means that the changes in between these two values are a mixture of uncommitted changes, committed changes, or rolled back changes.

    When the connector starts, the low watermark or scn read from the offsets is compared to the oldest available archive log in Oracle. If the archive log begins with a system change number that comes after this scn value, this error will occur.

    Long-running transactions directly impact the low watermark or scn position. If a transaction remains active for longer than your archive log retention policy and the connector is restarted due to a re-balance or failure, this error can occur. If you suspect long-running transactions, you can configure the log.mining.transaction.retention.ms property in order to discard a transaction that lives longer than the specified value. While this does cause data loss as that transaction’s changes are discarded, it does allow the low watermark to safely advance forward at a reasonable pace even when long-running transactions occur. You should set the transaction retention period to a value less than your archive log retention period.

    Another use case that can raise this error is if you are capturing changes from an Oracle database with a low volume of changes. In particular, if you are using an older version of Debezium where the LogMiner query applied database-level filters or you’ve configured the new query filter mode to apply database-level filters, it’s possible that the connector may go extended periods of time without observing a single change event. Since offset data only synchronizes when the connector sends an event to Kafka, low volumes of changes in a large window of time can mean those Kafka offsets become stale and if a restart occurs, this error could happen. In this use case, configuring both the heartbeat.internval.ms and heartbeat.action.query connector properties is a great way to guarantee that there is some activity flowing to Kafka to keep those offsets from becoming stale.

    ORA-01555: snapshot too old

    This specific error is most commonly observed during the connector’s initial snapshot. The Oracle connector relies on what is called flashback queries during the initial snapshot phase.

    A flashback query is a standard SELECT statement that uses a system change number in order to generate a result-set based on the state of the data at that given point in the database’s lifetime. This can be useful for a variety of reasons, including being able to restore objects without the need for media recovery because Oracle is capable of retaining that previous state for a certain period of time. The data returned by these queries use the Automatic Undo Management (AUM) subsystem and rely on the undo data area where transactions are recorded and retained for a given period of time, configurable based on the database parameter UNDO_RETENTION.

    If the SCN used for the flashback query becomes too old and the undo retention no longer maintains historical data for that system change number, Oracle will report an ORA-01555 error that the snapshot is now too old. When this happens during your initial snapshot, the snapshot will need to be retaken from the beginning and unless you reconfigure Oracle’s undo retention period to allow for a longer retention time, rerunning the snapshot on the same data set will result in the same outcome.

    So either a) have your DBA increase the UNDO_RETENTION database parameter temporarily or b) use a schema-only snapshot and then rely on incremental snapshots to generate the initial data set from your existing table data.

    Streaming changes takes several minutes to appear

    Occasionally users will notice there is a latency when the connector first starts or at specific periods during the connector’s lifetime. One of the best ways to identify what is going on is to coordinate with your DBA and to take a close look at your database’s alert log, which records all the LogMiner and XStream interactions that Debezium performs with your database. But most often this latency is quite common for users who use the default log mining strategy.

    The default mining strategy that we covered earlier performs a what is called a data dictionary build step and depending on your database, this can take some time to be written to your redo logs and then parsed by the LogMiner process. It’s not uncommon for this to take 30 seconds up to several minutes to complete, and when using the default mining strategy, this process occurs on each log switch.

    So we normally suggest that if you experience this latency frequently that you check the frequency of your log switches. If your database is performing excessive log switches within a small window that does not adhere to Oracle’s guidelines, your DBA may need to tune the database accordingly. Reducing the frequency of log switches increases the time that Debzium can reuse the same log file for mining and therefore reduces the need to build and parse the data dictionary.

    If your table schema does not change often or won’t change at all, you can reconfigure the connector to use the online_catalog mining strategy as an alternative. This avoids the writing the data dictionary to the redo logs and the parse phase performed by LogMiner, greatly increasing the speed at which a mining session begins at both connector start-up and at each log switch interval.

    How do you know if an archive log can be deleted?

    Database administrators typically keep archive logs on the database server for a short period of time before they’re removed. This interval varies and depends on a number of factors including the frequency the logs are created, their respective sizes, and the physical space available on the server. What is most important is that if Debezium requires a specific archive log that it remains available until it’s no longer needed for CDC.

    The easiest way to determine what logs are needed is via JMX metrics, looking at the field OffsetScn. This field references the system change number where the connector will resume from in case of a restart, and so any archive or redo log that contains this system change number or comes after this change number must remain available.

    Debezium 2.4 will add another JMX metric that will provide the cut-off timestamp for archive logs based on this OffsetScn. This means that you will be able to use this timestamp directly in shell scripts to compare the filesystem’s timestamp with the one from JMX metrics, safely knowing which logs can must be retained and which can safely be removed via RMAN.

    Memory footprint, how do you manage it efficiently?

    Due to the nature of how transaction data is written to the Oracle archive and redo logs, a buffer of the transaction state must be managed by the connector. Under ideal circumstances, this buffer maintains short-lived data, the transaction starts, we buffer its relevant changes, and we observe the rollback or commit, and the data managed in the buffer is handled and the buffer cleared.

    Because the connector buffers transactions, it’s extremely important that you have some prior knowledge of your environment’s transaction patterns. If this information can vary and cannot be predicted, you may want to consider using an alternative buffer type than the default Heap (memory) based buffer as this can easily lead to OutOfMemory exceptions under these circumstances when memory is configured too low.

    Please refer to the documentation about Event Buffering. The Oracle connector offers two Infinispan based solutions that allow the connector store the buffer off-heap, reducing the connector’s memory footprint and being capable of dealing with very large transactions seamlessly.

    Lastly, we have composed a collection of frequently asked questions in the documentation. We generally do our best to keep the most commonly asked questions there as a reference. Please take a moment and read through those and if you suspect anything may be missing, please open a Jira issue.

    Wrapping up

    I really hope this series on the Oracle connector has been helpful and informative. We’ve covered topics ranging from installation, configuration, deploying the connector, performance optimizations, and how to debug or evaluate specific common use cases we hear from the community.

    As I mentioned earlier, Oracle is unlike most other database platforms and requires a bit more care and precision to maximize it’s potential. When evaluating the Oracle connector, it is important that you coordinate with an Oracle administrator professional to make sure that you’re maximizing the connector’s potential, particularly if performance is a critical metric in your evaluation.

    As always, if you have questions about anything related to the content in this post or about something you observe in your environment, the team will do their best to provide you with the answers you need either by using our mailing list or reaching out to us on our chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/07/10/custom-http-signaling-notification/index.html b/blog/2023/07/10/custom-http-signaling-notification/index.html index fe9c3ae015..07fc86b3ff 100644 --- a/blog/2023/07/10/custom-http-signaling-notification/index.html +++ b/blog/2023/07/10/custom-http-signaling-notification/index.html @@ -178,4 +178,4 @@ }
    1 The signal.enabled.channels property specifies the signal channel to be used by the connector. In this case, the connector uses the http signal channel.
    2 The notification.enabled.channels property specifies the notification channel to be used by the connector. In this case, the connector uses the http notification channel.

    Now that we have the connector configuration file prepared, we can proceed to register the connector with Kafka Connect by executing the following command:

    curl -i -X POST -H "Accept:application/json" \
         -H  "Content-Type:application/json" http://localhost:8083/connectors/ \
         -d @register-postgres.json

    Once the connector is successfully registered, you can review the connector logs to observe the signal events. The logs provide insights into the processing and progress of the connector, including any signal-related information. You will encounter log messages similar to the following:

    Recorded signal event 'SignalRecord{id='924e3ff8-2245-43ca-ba77-2af9af02fa07', type='log', data='{"message":"Signal message received from http endpoint."}', additionalData={}}'    [io.debezium.examples.signal.HttpSignalChannel]

    Additionally, you might notice log messages related to notification events being sent to the Postbin. For example:

    [HTTP NOTIFICATION SERVICE] Sending notification to http channel   [io.debezium.examples.notification.HttpNotificationChannel]
    -Bin created: {"binId":"1688742588469-1816775151528","now":1688742588470,"expires":1688744388470}   [io.debezium.examples.notification.HttpNotificationChannel]

    It provides information about the notification event, such as the creation of a bin with a unique identifier (binId) and other relevant details. To retrieve the notification event from Postbin, fetch the binId from the log message and use it to request the corresponding notification event from Postbin. To view the notification event, you can access Postbin using the following URL: https://www.toptal.com/developers/postbin/b/:binId. Replace :binId in the URL with the actual binId obtained from the connector logs.

    The notification event sent to Postbin looks like the following:

    Postbin Preview

    Conclusion

    In this tutorial, we explored how to create custom signal and notification channels for Debezium connectors. We created a custom signal channel that receive a signal event from an HTTP endpoint. We also created a custom notification channel that sends a notification event to an HTTP endpoint.

    Debezium’s comprehensive signaling and notification system offers seamless integration with third-party solutions, allowing users to stay informed about the state and progress of Debezium connectors. The system’s extensibility empowers users to customize both the signals and notification channels to fit their customized needs.

    Stay tuned for Part 3 of this series where we will explore about JMX signaling and notifications. In the meantime, you can check out the Debezium documentation for more information about signal and notification channels.

    If you have any questions or feedback, please feel free to reach out to us on the Debezium mailing list or the #community-general channel on the Zulip chat. We would love to hear from you!

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +Bin created: {"binId":"1688742588469-1816775151528","now":1688742588470,"expires":1688744388470} [io.debezium.examples.notification.HttpNotificationChannel]

    It provides information about the notification event, such as the creation of a bin with a unique identifier (binId) and other relevant details. To retrieve the notification event from Postbin, fetch the binId from the log message and use it to request the corresponding notification event from Postbin. To view the notification event, you can access Postbin using the following URL: https://www.toptal.com/developers/postbin/b/:binId. Replace :binId in the URL with the actual binId obtained from the connector logs.

    The notification event sent to Postbin looks like the following:

    Postbin Preview

    Conclusion

    In this tutorial, we explored how to create custom signal and notification channels for Debezium connectors. We created a custom signal channel that receive a signal event from an HTTP endpoint. We also created a custom notification channel that sends a notification event to an HTTP endpoint.

    Debezium’s comprehensive signaling and notification system offers seamless integration with third-party solutions, allowing users to stay informed about the state and progress of Debezium connectors. The system’s extensibility empowers users to customize both the signals and notification channels to fit their customized needs.

    Stay tuned for Part 3 of this series where we will explore about JMX signaling and notifications. In the meantime, you can check out the Debezium documentation for more information about signal and notification channels.

    If you have any questions or feedback, please feel free to reach out to us on the Debezium mailing list or the #community-general channel on the Zulip chat. We would love to hear from you!

    Anisha Mohanty

    Anisha is a Software Engineer at Red Hat. Currently working with the Debezium Team. She lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/07/17/debezium-2-4-alpha1-released/index.html b/blog/2023/07/17/debezium-2-4-alpha1-released/index.html index 3892b7bc4e..83d543370c 100644 --- a/blog/2023/07/17/debezium-2-4-alpha1-released/index.html +++ b/blog/2023/07/17/debezium-2-4-alpha1-released/index.html @@ -1 +1 @@ - Debezium 2.4.0.Alpha1 Released

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1.

    This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth.

    Breaking changes

    MongoDB

    The MongoDB connector explicitly preferred to use the secondary under specific scenarios previously. This explicit usage created problems for users who wanted to connect to the primary node. Thanks to recent changes (DBZ-6521), this is no longer the case and the connection string setting is used instead.

    Vitess

    The Vitess connector’s change event structure has been slightly adjusted thanks to changes (DBZ-6617). The change event’s source information block now includes a new field that identifies the shard the event originated from.

    New Features

    Offset editor example

    Users often express the need to manipulate connector offsets for various reasons. This can often be very difficult for those who may not be familiar with Kafka’s CLI tools or Java if you use Debezium Server. Thanks to a contribution (DBZ-6338) by Nathan Smit, you can now use an editor to manipulate the offsets from the command line or a web-based interface.

    Head to our examples repository and follow the README.md to get started.

    Error handling

    Some Debezium connectors previously used a connector property, errors.max.retries. This property controlled how often a Debezium connector failure exception would be explicitly wrapped in a RetriableException but the connector threw the raw exception up to the runtime. While this may sound similar to Kafka Connect’s errors.retry.timeout, this effectively gave users a common way to deal with retries across multiple Debezium runtimes, including Kafka Connect, Debezium Server, and Debezium Embedded.

    With this release, DBZ-6573 unifies this behavior making it available to all connectors.

    Notify initial snapshot progress

    Debezium’s new notification subsystem provides an easy way to integrate third-party tools and applications with Debezium to gain insight into the ongoing change data capture process, above and beyond the traditional JMX approach. In 2.4, the notification subsystem now includes the ability to notify you about the status of the ongoing initial snapshot (DBZ-6416).

    Initial snapshot notifications are emitted with an aggregatetType of Initial Snapshot and contain a type field that exposes the current status of the snapshot. The possible values include: STARTED, ABORTED, PAUSED, RESUMED, IN_PROGRESS, TABLE_SCAN_COMPLETED, and COMPLETED.

    MySQL improvements

    Thanks to a contribution provided by Harvey Yue (DBZ-6472), the MySQL connector will use parallelization to generate schema events during its snapshot phase. This should improve the overall performance when capturing the schema for many tables in your database. We plan to investigate how this can be extended to other relational connectors.

    MongoDB improvements

    The MongoDB connector continues to see lots of active development. This release introduces several new features specifically for MongoDB, which include:

    • Cluster-wide privileges are no longer necessary when watching a single database or collection (DBZ-6182).

    • Read preference taken from connection string (DBZ-6468, DBZ-6578).

    • Support authentication with TC MongoDB deployments (DBZ-6596).

    As we continue to make further improvements to the MongoDB connector, please let us know if there are still rough edges or enhancements that will help streamline its usage.

    Oracle improvements

    Debezium 2.4 supports several new Oracle data types, which include XML_TYPE and RAW (DBZ-3605). Two new Oracle dependencies were necessary to support XML: xdb and xmlparserv2. These dependencies are not redistributable, so they’re not included in the connector plugin archive by default, much like the connector’s driver. You must obtain these directly from Maven Central or oracle, just like the driver dependency.

    In addition, XML works similarly to CLOB and BLOB data types; therefore, the connector must be configured with lob.enabled set to true to ingest XML changes. We’d love to hear your feedback on this new feature as it’s been requested for quite some time.

    JDBC sink improvements

    Thanks to a contribution from Nicholas Fwang (DBZ-6595), the JDBC sink connector can now reference values from the change event’s source information block as a part of the connector’s configuration property table.name.format. If you want to reference such fields, simply use ${source.<field-name>} in the configuration, and the field’s value will be used.

    In addition, Roman Kudryashov also contributed the ability to resolve a row’s primary key from a header defined on the change event (DBZ-6602). To use this new feature, specify the connector configuration property primary.key.mode as record_header. If the header value is a primitive type, you will need to define a primary.key.fields configuration similar to how you would if the event’s record key was a primitive. If the header value is a struct type, all fields of the structure will be used by default, but specifying the primary.key.fields property allows you to choose a subset of fields from the header as the key.

    Spanner improvements

    It was possible due to certain conditions that a Spanner connector may not advance from the START_INITIAL_SYNC state during initialization. After investigation by Nancy Xu, a new configuration option was introduced to supply a configurable timeout. This can be done by adding the following to the connector’s configuration:

    connector.spanner.task.await.initialization.timeout=<timeout in milliseconds>

    Debezium UI metrics

    The Debezium UI project allows you to easily deploy any Debezium connector onto Kafka Connect using a web-based interface. This release has improved the interface by including several connector metrics (DBZ-5321) on the main connector listing view. We’d love your feedback on this change and welcome any suggestions on other metrics you may find useful.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Mysql connector fails to parse statement FLUSH FIREWALL_RULES DBZ-3925

    • Add the API endpoint to expose running connector metrics DBZ-5359

    • Display critical connector metrics DBZ-5360

    • Snapshot result not saved if LAST record is filtered out DBZ-5464

    • Define and document schema history topic messages schema DBZ-5518

    • Align query.fetch.size across connectors DBZ-5676

    • Upgrade to Apache Kafka 3.5.0 DBZ-6047

    • Remove downstream related code from UI Frontend code DBZ-6394

    • Make Signal actions extensible DBZ-6417

    • CloudEventsConverter throws static error on Kafka Connect 3.5+ DBZ-6517

    • Dependency io.debezium:debezium-testing-testcontainers affects logback in tests DBZ-6525

    • Cleanup duplicate jobs from jenkins DBZ-6535

    • Implement sharded MongoDB ocp deployment and integration tests DBZ-6538

    • Batches with DELETE statement first will skip everything else DBZ-6576

    • Oracle unsupported DDL statement - drop multiple partitions DBZ-6585

    • Only Struct objects supported for [Header field insertion], found: null DBZ-6588

    • Support PostgreSQL coercion for UUID, JSON, and JSONB data types DBZ-6589

    • MySQL parser cannot parse CAST AS dec DBZ-6590

    • Refactor retry handling in Redis schema history DBZ-6594

    • Excessive Log Message 'Marking Processed Record for Topic' DBZ-6597

    • Support for custom tags in the connector metrics DBZ-6603

    • Fixed DataCollections for table scan completion notification DBZ-6605

    • Oracle connector is not recoverable if ORA-01327 is wrapped by another JDBC or Oracle exception DBZ-6610

    • Fatal error when parsing Mysql (Percona 5.7.39-42) procedure DBZ-6613

    • Build of Potgres connector fails when building against Kafka 2.X DBZ-6614

    • Upgrade postgresql driver to v42.6.0 DBZ-6619

    • MySQL ALTER USER with RETAIN CURRENT PASSWORD fails with parsing exception DBZ-6622

    • Upgrade Quarkus to 3.2.0.Final DBZ-6626

    • Inaccurate documentation regarding additional-condition DBZ-6628

    • Oracle connection SQLRecoverableExceptions are not retried by default DBZ-6633

    • Upgrade kcctl to 1.0.0.Beta3 DBZ-6642

    • Cannot delete non-null interval value DBZ-6648

    • Upgrade gRPC to 1.56.1 DBZ-6649

    • ConcurrentModificationException thrown in Debezium 2.3 DBZ-6650

    • Dbz crashes on parsing Mysql Procedure Code (Statement Labels) DBZ-6651

    • CloudEvents converter is broken for JSON message deserialization DBZ-6654

    • Vitess: Connector fails if table name is a mysql reserved word DBZ-6656

    • Junit conflicts cause by test-containers module using transitive Junit5 from quarkus DBZ-6659

    • Disable Kafka 2.x CRON trigger DBZ-6667

    What’s next?

    This initial release of Debezium 2.4 is already packed with lots of new features and the team is only getting started. Looking at our road map, we’ve already tackled nearly half of our plans for 2.4, but much still remains including:

    • Single message transforms for TimescaleDB and Timestamps

    • OpenLogReplicator ingestion for Oracle

    • Ad-hoc blocking snapshots

    • Parallelization of Debezium Embedded

    • Parallel incremental snapshots for MongoDB

    • Further improvements to Debezium UI

    We intend to stick to our approximate two week cadence, so expect Alpha2 at the start of August. Until then, please be sure to get in touch with us on the mailing list or our chat if you have any ideas or suggestions.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.4.0.Alpha1 Released

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1.

    This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth.

    Breaking changes

    MongoDB

    The MongoDB connector explicitly preferred to use the secondary under specific scenarios previously. This explicit usage created problems for users who wanted to connect to the primary node. Thanks to recent changes (DBZ-6521), this is no longer the case and the connection string setting is used instead.

    Vitess

    The Vitess connector’s change event structure has been slightly adjusted thanks to changes (DBZ-6617). The change event’s source information block now includes a new field that identifies the shard the event originated from.

    New Features

    Offset editor example

    Users often express the need to manipulate connector offsets for various reasons. This can often be very difficult for those who may not be familiar with Kafka’s CLI tools or Java if you use Debezium Server. Thanks to a contribution (DBZ-6338) by Nathan Smit, you can now use an editor to manipulate the offsets from the command line or a web-based interface.

    Head to our examples repository and follow the README.md to get started.

    Error handling

    Some Debezium connectors previously used a connector property, errors.max.retries. This property controlled how often a Debezium connector failure exception would be explicitly wrapped in a RetriableException but the connector threw the raw exception up to the runtime. While this may sound similar to Kafka Connect’s errors.retry.timeout, this effectively gave users a common way to deal with retries across multiple Debezium runtimes, including Kafka Connect, Debezium Server, and Debezium Embedded.

    With this release, DBZ-6573 unifies this behavior making it available to all connectors.

    Notify initial snapshot progress

    Debezium’s new notification subsystem provides an easy way to integrate third-party tools and applications with Debezium to gain insight into the ongoing change data capture process, above and beyond the traditional JMX approach. In 2.4, the notification subsystem now includes the ability to notify you about the status of the ongoing initial snapshot (DBZ-6416).

    Initial snapshot notifications are emitted with an aggregatetType of Initial Snapshot and contain a type field that exposes the current status of the snapshot. The possible values include: STARTED, ABORTED, PAUSED, RESUMED, IN_PROGRESS, TABLE_SCAN_COMPLETED, and COMPLETED.

    MySQL improvements

    Thanks to a contribution provided by Harvey Yue (DBZ-6472), the MySQL connector will use parallelization to generate schema events during its snapshot phase. This should improve the overall performance when capturing the schema for many tables in your database. We plan to investigate how this can be extended to other relational connectors.

    MongoDB improvements

    The MongoDB connector continues to see lots of active development. This release introduces several new features specifically for MongoDB, which include:

    • Cluster-wide privileges are no longer necessary when watching a single database or collection (DBZ-6182).

    • Read preference taken from connection string (DBZ-6468, DBZ-6578).

    • Support authentication with TC MongoDB deployments (DBZ-6596).

    As we continue to make further improvements to the MongoDB connector, please let us know if there are still rough edges or enhancements that will help streamline its usage.

    Oracle improvements

    Debezium 2.4 supports several new Oracle data types, which include XML_TYPE and RAW (DBZ-3605). Two new Oracle dependencies were necessary to support XML: xdb and xmlparserv2. These dependencies are not redistributable, so they’re not included in the connector plugin archive by default, much like the connector’s driver. You must obtain these directly from Maven Central or oracle, just like the driver dependency.

    In addition, XML works similarly to CLOB and BLOB data types; therefore, the connector must be configured with lob.enabled set to true to ingest XML changes. We’d love to hear your feedback on this new feature as it’s been requested for quite some time.

    JDBC sink improvements

    Thanks to a contribution from Nicholas Fwang (DBZ-6595), the JDBC sink connector can now reference values from the change event’s source information block as a part of the connector’s configuration property table.name.format. If you want to reference such fields, simply use ${source.<field-name>} in the configuration, and the field’s value will be used.

    In addition, Roman Kudryashov also contributed the ability to resolve a row’s primary key from a header defined on the change event (DBZ-6602). To use this new feature, specify the connector configuration property primary.key.mode as record_header. If the header value is a primitive type, you will need to define a primary.key.fields configuration similar to how you would if the event’s record key was a primitive. If the header value is a struct type, all fields of the structure will be used by default, but specifying the primary.key.fields property allows you to choose a subset of fields from the header as the key.

    Spanner improvements

    It was possible due to certain conditions that a Spanner connector may not advance from the START_INITIAL_SYNC state during initialization. After investigation by Nancy Xu, a new configuration option was introduced to supply a configurable timeout. This can be done by adding the following to the connector’s configuration:

    connector.spanner.task.await.initialization.timeout=<timeout in milliseconds>

    Debezium UI metrics

    The Debezium UI project allows you to easily deploy any Debezium connector onto Kafka Connect using a web-based interface. This release has improved the interface by including several connector metrics (DBZ-5321) on the main connector listing view. We’d love your feedback on this change and welcome any suggestions on other metrics you may find useful.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Mysql connector fails to parse statement FLUSH FIREWALL_RULES DBZ-3925

    • Add the API endpoint to expose running connector metrics DBZ-5359

    • Display critical connector metrics DBZ-5360

    • Snapshot result not saved if LAST record is filtered out DBZ-5464

    • Define and document schema history topic messages schema DBZ-5518

    • Align query.fetch.size across connectors DBZ-5676

    • Upgrade to Apache Kafka 3.5.0 DBZ-6047

    • Remove downstream related code from UI Frontend code DBZ-6394

    • Make Signal actions extensible DBZ-6417

    • CloudEventsConverter throws static error on Kafka Connect 3.5+ DBZ-6517

    • Dependency io.debezium:debezium-testing-testcontainers affects logback in tests DBZ-6525

    • Cleanup duplicate jobs from jenkins DBZ-6535

    • Implement sharded MongoDB ocp deployment and integration tests DBZ-6538

    • Batches with DELETE statement first will skip everything else DBZ-6576

    • Oracle unsupported DDL statement - drop multiple partitions DBZ-6585

    • Only Struct objects supported for [Header field insertion], found: null DBZ-6588

    • Support PostgreSQL coercion for UUID, JSON, and JSONB data types DBZ-6589

    • MySQL parser cannot parse CAST AS dec DBZ-6590

    • Refactor retry handling in Redis schema history DBZ-6594

    • Excessive Log Message 'Marking Processed Record for Topic' DBZ-6597

    • Support for custom tags in the connector metrics DBZ-6603

    • Fixed DataCollections for table scan completion notification DBZ-6605

    • Oracle connector is not recoverable if ORA-01327 is wrapped by another JDBC or Oracle exception DBZ-6610

    • Fatal error when parsing Mysql (Percona 5.7.39-42) procedure DBZ-6613

    • Build of Potgres connector fails when building against Kafka 2.X DBZ-6614

    • Upgrade postgresql driver to v42.6.0 DBZ-6619

    • MySQL ALTER USER with RETAIN CURRENT PASSWORD fails with parsing exception DBZ-6622

    • Upgrade Quarkus to 3.2.0.Final DBZ-6626

    • Inaccurate documentation regarding additional-condition DBZ-6628

    • Oracle connection SQLRecoverableExceptions are not retried by default DBZ-6633

    • Upgrade kcctl to 1.0.0.Beta3 DBZ-6642

    • Cannot delete non-null interval value DBZ-6648

    • Upgrade gRPC to 1.56.1 DBZ-6649

    • ConcurrentModificationException thrown in Debezium 2.3 DBZ-6650

    • Dbz crashes on parsing Mysql Procedure Code (Statement Labels) DBZ-6651

    • CloudEvents converter is broken for JSON message deserialization DBZ-6654

    • Vitess: Connector fails if table name is a mysql reserved word DBZ-6656

    • Junit conflicts cause by test-containers module using transitive Junit5 from quarkus DBZ-6659

    • Disable Kafka 2.x CRON trigger DBZ-6667

    What’s next?

    This initial release of Debezium 2.4 is already packed with lots of new features and the team is only getting started. Looking at our road map, we’ve already tackled nearly half of our plans for 2.4, but much still remains including:

    • Single message transforms for TimescaleDB and Timestamps

    • OpenLogReplicator ingestion for Oracle

    • Ad-hoc blocking snapshots

    • Parallelization of Debezium Embedded

    • Parallel incremental snapshots for MongoDB

    • Further improvements to Debezium UI

    We intend to stick to our approximate two week cadence, so expect Alpha2 at the start of August. Until then, please be sure to get in touch with us on the mailing list or our chat if you have any ideas or suggestions.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/07/27/debezium-2-3-1-final-released/index.html b/blog/2023/07/27/debezium-2-3-1-final-released/index.html index f8fa9bfebe..d36480fbda 100644 --- a/blog/2023/07/27/debezium-2-3-1-final-released/index.html +++ b/blog/2023/07/27/debezium-2-3-1-final-released/index.html @@ -2,4 +2,4 @@ DELETE FROM ... INSERT INTO ... INSERT INTO ... -APPLY BATCH

    The issue (DBZ-6576) was this particular batch was being interpreted incorrectly and as a a result, the two following INSERT operations were being omitted from the connector’s event stream. Thanks to the work by the community, a patch was applied and included in Debezium 2.3.1.Final where this is no longer the case and the Debezium change event stream will contain the DELETE as well as all other events that follow.

    Other changes

    Debezium 2.3.1.Final also includes quite a number of bug fixes and stability improvements, see below:

    • Oracle unsupported DDL statement - drop multiple partitions DBZ-6585

    • Only Struct objects supported for [Header field insertion], found: null DBZ-6588

    • MySQL parser cannot parse CAST AS dec DBZ-6590

    • Refactor retry handling in Redis schema history DBZ-6594

    • Excessive Log Message 'Marking Processed Record for Topic' DBZ-6597

    • Oracle DDL parser does not properly detect end of statement when comments obfuscate the semicolon DBZ-6599

    • Fixed DataCollections for table scan completion notificaiton DBZ-6605

    • Oracle connector is not recoverable if ORA-01327 is wrapped by another JDBC or Oracle exception DBZ-6610

    • Fatal error when parsing Mysql (Percona 5.7.39-42) procedure DBZ-6613

    • MySQL ALTER USER with RETAIN CURRENT PASSWORD fails with parsing exception DBZ-6622

    • Inaccurate documentation regarding additional-condition DBZ-6628

    • Oracle connection SQLRecoverableExceptions are not retried by default DBZ-6633

    • When Debezium Mongodb connector encounter authentication or under privilege errors, the connection between debezium and mongodb keeps going up. DBZ-6643

    • Cannot delete non-null interval value DBZ-6648

    • ConcurrentModificationException thrown in Debezium 2.3 DBZ-6650

    • Dbz crashes on parsing Mysql Procedure Code (Statement Labels) DBZ-6651

    • Vitess: Connector fails if table name is a mysql reserved word DBZ-6656

    • Retriable operations are retried infinitely since error handlers are not reused DBZ-6670

    • NotificationIT with Oracle xstream fails randomly DBZ-6672

    • Flaky Oracle test: shouldCaptureChangesForTransactionsAcrossSnapshotBoundaryWithoutReemittingDDLChanges DBZ-6673

    • Oracle DDL parser does not support column visibility on ALTER TABLE DBZ-6677

    • MongoDB SRV protocol not working in Debezium Server DBZ-6701

    • Add tzdata-java to UI installation Dockerfile DBZ-6713

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Outlook and what’s next?

    Debezium 2.3 will continue to receive maintenance updates throughout this quarter with the next being early/middle August and likely additional follow-ups late August and September, all barring on community feedback on regressions.

    Presently the team is actively working on Debezium 2.4 in parallel, and I would expect to see Alpha2 most likely at the early part of August. We have lots of new features planned for Debezium 2.4, so I urge you to take a look at our road map for more details.

    I will be making a formal announcement next week about the new, upcoming Debezium community event. This will be hosted by the Debezium team to provide a space where the team, contributors, and community can openly collaborate. I’ll have more details next week, so be on the look out for this!

    And finally, Current 2023 (aka Kafka Summit) is nearing. The event this year is being hosted in San Jose, California on September 26th and 27th. If you’re planning to attend, please drop me an email. I would enjoy an opportunity to talk with the community and gather your feedback about Debezium and how we can improve moving forward!

    Until then…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +APPLY BATCH

    The issue (DBZ-6576) was this particular batch was being interpreted incorrectly and as a a result, the two following INSERT operations were being omitted from the connector’s event stream. Thanks to the work by the community, a patch was applied and included in Debezium 2.3.1.Final where this is no longer the case and the Debezium change event stream will contain the DELETE as well as all other events that follow.

    Other changes

    Debezium 2.3.1.Final also includes quite a number of bug fixes and stability improvements, see below:

    • Oracle unsupported DDL statement - drop multiple partitions DBZ-6585

    • Only Struct objects supported for [Header field insertion], found: null DBZ-6588

    • MySQL parser cannot parse CAST AS dec DBZ-6590

    • Refactor retry handling in Redis schema history DBZ-6594

    • Excessive Log Message 'Marking Processed Record for Topic' DBZ-6597

    • Oracle DDL parser does not properly detect end of statement when comments obfuscate the semicolon DBZ-6599

    • Fixed DataCollections for table scan completion notificaiton DBZ-6605

    • Oracle connector is not recoverable if ORA-01327 is wrapped by another JDBC or Oracle exception DBZ-6610

    • Fatal error when parsing Mysql (Percona 5.7.39-42) procedure DBZ-6613

    • MySQL ALTER USER with RETAIN CURRENT PASSWORD fails with parsing exception DBZ-6622

    • Inaccurate documentation regarding additional-condition DBZ-6628

    • Oracle connection SQLRecoverableExceptions are not retried by default DBZ-6633

    • When Debezium Mongodb connector encounter authentication or under privilege errors, the connection between debezium and mongodb keeps going up. DBZ-6643

    • Cannot delete non-null interval value DBZ-6648

    • ConcurrentModificationException thrown in Debezium 2.3 DBZ-6650

    • Dbz crashes on parsing Mysql Procedure Code (Statement Labels) DBZ-6651

    • Vitess: Connector fails if table name is a mysql reserved word DBZ-6656

    • Retriable operations are retried infinitely since error handlers are not reused DBZ-6670

    • NotificationIT with Oracle xstream fails randomly DBZ-6672

    • Flaky Oracle test: shouldCaptureChangesForTransactionsAcrossSnapshotBoundaryWithoutReemittingDDLChanges DBZ-6673

    • Oracle DDL parser does not support column visibility on ALTER TABLE DBZ-6677

    • MongoDB SRV protocol not working in Debezium Server DBZ-6701

    • Add tzdata-java to UI installation Dockerfile DBZ-6713

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Outlook and what’s next?

    Debezium 2.3 will continue to receive maintenance updates throughout this quarter with the next being early/middle August and likely additional follow-ups late August and September, all barring on community feedback on regressions.

    Presently the team is actively working on Debezium 2.4 in parallel, and I would expect to see Alpha2 most likely at the early part of August. We have lots of new features planned for Debezium 2.4, so I urge you to take a look at our road map for more details.

    I will be making a formal announcement next week about the new, upcoming Debezium community event. This will be hosted by the Debezium team to provide a space where the team, contributors, and community can openly collaborate. I’ll have more details next week, so be on the look out for this!

    And finally, Current 2023 (aka Kafka Summit) is nearing. The event this year is being hosted in San Jose, California on September 26th and 27th. If you’re planning to attend, please drop me an email. I would enjoy an opportunity to talk with the community and gather your feedback about Debezium and how we can improve moving forward!

    Until then…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/08/04/debezium-2-3-2-final-released/index.html b/blog/2023/08/04/debezium-2-3-2-final-released/index.html index 117d5c1e55..1107d147f1 100644 --- a/blog/2023/08/04/debezium-2-3-2-final-released/index.html +++ b/blog/2023/08/04/debezium-2-3-2-final-released/index.html @@ -1 +1 @@ - Debezium 2.3.2.Final Released

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    SQL Server refuses to start

    If you have recently tried to upgrade to Debezium 2.3.1.Final, you may have found when using the SQL Server connector that you received an unusual error when starting the connector which said, "Configuration query.fetch.size is defined twice."

    Unfortunately, this error was not intended and there is no workaround to remedy the issue. Thankfully, Debezium 2.3.2.Final is here to the recue; this release addresses this regression, allowing SQL Server connectors to start once again. If you are looking to upgrade, and you rely on the SQL Server connector, we strongly recommend that you avoid the 2.3.1.Final build and instead move directly to 2.3.2.Final.

    Oracle default fetch size changed

    Debezium uses JDBC in order to communicate with the Oracle database. The Debezium for Oracle connector relies on two configuration properties, snapshot.fetch.size and query.fetch.size to control how much data is returned for a query on each database "fetch" call.

    When these properties are configured too low, this can cause Debezium to perform more network round trips to the database to read data and that network latency can add up, particularly when working with large result sets. When these properties are configured too high, this can cause Debezium to consume more memory, but reduces the network latency incurred for the fetch round trips to the database. Ultimately, it’s important to strike a good balance based both on the what your ideal data size may be but also based on the memory and hardware constraints of your environment.

    While discussing performance with one community member, we concluded that adjusting these values from their default of 2000 to 10000 increased the connector’s throughput quite substantially for their environment. So in this release, we felt it made logical sense to consider increasing the default to 10000 to provide a better out-of-the-box experience for Oracle connector users.

    Now, these configuration properties are performance tuning knobs, and unfortunately there isn’t a guarantee that what works well for some environments is going to necessarily be universally good. Please take note of this change and if you experience any issues, you can always set the snapshot.fetch.size and query.fetch.size properties in your connector configuration, even setting them back to their previous default of 2000 if necessary.

    Other changes

    Debezium 2.3.2.Final also includes quite a number of bug fixes and stability improvements, see below:

    • Highlight information about how to configure the schema history topic to store data only for intended tables DBZ-6219

    • Should use topic.prefix rather than connector.server.name in MBean namings DBZ-6690

    • Upstream documentation missing types for configurations DBZ-6707

    • Custom properties step not working correctly in validation of the properties added by user DBZ-6711

    • Oracle fails to process a DROP USER DBZ-6716

    • Oracle LogMiner mining distance calculation should be skipped when upper bounds is not within distance DBZ-6733

    • MariaDB: Unparseable DDL statement (ALTER TABLE IF EXISTS) DBZ-6736

    • Decouple Debezium Server and Extension Quarkus versions DBZ-6744

    • MySQL dialect does not properly recognize non-default value longblob types due to typo DBZ-6753

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community who contributed to Debezium 2.3.2.Final: Bob Roldan, Chris Cranford, Indra Shukla, Jiri Pechanec, Jochen Schalanda, Robert Roldan, Vojtech Juranek, Yashashree Chopada, faihofu, j2gg0s, and paul cheung!

    Outlook and what’s next?

    A great deal of work has already gone into the new preview release of Debezium 2.4. We plan to do the next Alpha2 build in the middle of next week, which will include a plethora of new features and improvements. There is still time to share your feedback and suggestions if there are things you’d like to see in 2.4, so take a look at our road map and reach out on the mailing list or our chat.

    Finally, Debezium 2.3 will continue to receive maintenance updates. We’ll likely release 2.3.3.Final later in the month barring the community feedback on regressions and bug fixes.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.3.2.Final Released

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    SQL Server refuses to start

    If you have recently tried to upgrade to Debezium 2.3.1.Final, you may have found when using the SQL Server connector that you received an unusual error when starting the connector which said, "Configuration query.fetch.size is defined twice."

    Unfortunately, this error was not intended and there is no workaround to remedy the issue. Thankfully, Debezium 2.3.2.Final is here to the recue; this release addresses this regression, allowing SQL Server connectors to start once again. If you are looking to upgrade, and you rely on the SQL Server connector, we strongly recommend that you avoid the 2.3.1.Final build and instead move directly to 2.3.2.Final.

    Oracle default fetch size changed

    Debezium uses JDBC in order to communicate with the Oracle database. The Debezium for Oracle connector relies on two configuration properties, snapshot.fetch.size and query.fetch.size to control how much data is returned for a query on each database "fetch" call.

    When these properties are configured too low, this can cause Debezium to perform more network round trips to the database to read data and that network latency can add up, particularly when working with large result sets. When these properties are configured too high, this can cause Debezium to consume more memory, but reduces the network latency incurred for the fetch round trips to the database. Ultimately, it’s important to strike a good balance based both on the what your ideal data size may be but also based on the memory and hardware constraints of your environment.

    While discussing performance with one community member, we concluded that adjusting these values from their default of 2000 to 10000 increased the connector’s throughput quite substantially for their environment. So in this release, we felt it made logical sense to consider increasing the default to 10000 to provide a better out-of-the-box experience for Oracle connector users.

    Now, these configuration properties are performance tuning knobs, and unfortunately there isn’t a guarantee that what works well for some environments is going to necessarily be universally good. Please take note of this change and if you experience any issues, you can always set the snapshot.fetch.size and query.fetch.size properties in your connector configuration, even setting them back to their previous default of 2000 if necessary.

    Other changes

    Debezium 2.3.2.Final also includes quite a number of bug fixes and stability improvements, see below:

    • Highlight information about how to configure the schema history topic to store data only for intended tables DBZ-6219

    • Should use topic.prefix rather than connector.server.name in MBean namings DBZ-6690

    • Upstream documentation missing types for configurations DBZ-6707

    • Custom properties step not working correctly in validation of the properties added by user DBZ-6711

    • Oracle fails to process a DROP USER DBZ-6716

    • Oracle LogMiner mining distance calculation should be skipped when upper bounds is not within distance DBZ-6733

    • MariaDB: Unparseable DDL statement (ALTER TABLE IF EXISTS) DBZ-6736

    • Decouple Debezium Server and Extension Quarkus versions DBZ-6744

    • MySQL dialect does not properly recognize non-default value longblob types due to typo DBZ-6753

    Please refer to the release notes to learn more about all fixed bugs, update procedures, etc.

    Many thanks to the following individuals from the community who contributed to Debezium 2.3.2.Final: Bob Roldan, Chris Cranford, Indra Shukla, Jiri Pechanec, Jochen Schalanda, Robert Roldan, Vojtech Juranek, Yashashree Chopada, faihofu, j2gg0s, and paul cheung!

    Outlook and what’s next?

    A great deal of work has already gone into the new preview release of Debezium 2.4. We plan to do the next Alpha2 build in the middle of next week, which will include a plethora of new features and improvements. There is still time to share your feedback and suggestions if there are things you’d like to see in 2.4, so take a look at our road map and reach out on the mailing list or our chat.

    Finally, Debezium 2.3 will continue to receive maintenance updates. We’ll likely release 2.3.3.Final later in the month barring the community feedback on regressions and bug fixes.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/08/09/debezium-2-4-alpha2-released/index.html b/blog/2023/08/09/debezium-2-4-alpha2-released/index.html index 111d4cf6db..f1dd63fa62 100644 --- a/blog/2023/08/09/debezium-2-4-alpha2-released/index.html +++ b/blog/2023/08/09/debezium-2-4-alpha2-released/index.html @@ -6,4 +6,4 @@ "additional-condition": "last_update_date >= '2023-01-01'" } }
    1 The use of BLOCKING rather than INCREMENTAL differentiates the two ad-hoc snapshot modes.

    Source-to-sink column name propagation

    Normally column names map directly to field names and vice versa when consumed by sink connectors such as a JDBC connector. However, there are situations where the serialization technology, such as Avro, has very specific rules about field naming conventions. When a column’s name in a database table conflicts with the serialization rule’s naming conventions, Debezium will rename the field in the event so that it adheres to the serialization’s rules. This often means that a field will be prepended with underscores or invalid characters replaced with underscores.

    This can create problems for certain types of sinks, such as a JDBC sink connector, because the sink cannot easily deduce the original column name for the destination table nor can it adequately map between the event’s field name and a column name if they differ. This typically means users must use transformation chains on the sink side in order to reconstruct the event’s fields with namings that represent the source.

    Debezium 2.4 introduces a way to minimize and potentially avoid that entirely by propagating the original column name as a field schema parameter, much in the same way that we do for data types, precision, scale, and length. The schema parameter __debezium.source.column.name now includes the original column name when column or data type propagation is enabled.

    The Debezium JDBC sink connector already works with column and data type propagation, allowing for the sink connector to more accurately deduce column types, length, precision, and scale.

    With this new feature, the JDBC sink connector will automatically use the column name from this argument when it is provided to guarantee that the destination table will be created with the same column names as the source, even when using Avro or similar. This means no transformations are needed when using the Debezium JDBC sink connector.

    Alternative MySQL JDBC drivers

    In order to use IAM authentication on AWS, a special MySQL driver is required to provide that type of functionality. With Debezium 2.4, you can now provide a reference to this specific driver and the connector will use that driver instead of the default driver shipped with the connector.

    As an example, to connect using IAM authentication on AWS, the following configuration is needed:

    database.jdbc.driver=software.aws.rds.jdbc.mysql.Driver
    -database.jdbc.protocol=jdbc:mysql:aws

    The database.jdbc.driver specifies the driver that should be loaded by the connector and used to communicate with the MySQL database. The database.jdbc.protocol is a supplemental configuration property that may not be required in all contexts. It defaults to jdbc:mysql but since AWS requires jdbc:mysql:aws, this allows you to specify this derivative within the configuration.

    We’ve love to hear feedback and whether something like this might be useful for other scenarios.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Switch tracing to OpenTelemetry DBZ-2862

    • Connector drop down causes a scroll bar DBZ-5421

    • Provide outline for drawer component showing connector details DBZ-5831

    • Modify scroll for the running connector component DBZ-5832

    • Connector restart regression DBZ-6213

    • Highlight information about how to configure the schema history topic to store data only for intended tables DBZ-6219

    • Document Optimal MongoDB Oplog Config for Resiliency DBZ-6455

    • JDBC Schema History: When the table name is passed as dbName.tableName, the connector does not start DBZ-6484

    • Update the Edit connector UI to incorporate the feedback received from team in demo DBZ-6514

    • Support blocking ad-hoc snapshots DBZ-6566

    • Add new parameters to RabbitMQ consumer DBZ-6581

    • Document read preference changes in 2.4 DBZ-6591

    • Oracle DDL parser does not properly detect end of statement when comments obfuscate the semicolon DBZ-6599

    • Received an unexpected message type that does not have an 'after' Debezium block DBZ-6637

    • When Debezium Mongodb connector encounter authentication or under privilege errors, the connection between debezium and mongodb keeps going up. DBZ-6643

    • Log appropriate error when JDBC connector receive SchemaChange record DBZ-6655

    • Send tombstone events when partition queries are finished DBZ-6658

    • Snapshot will not capture data when signal.data.collection is present without table.include.list DBZ-6669

    • Retriable operations are retried infinitely since error handlers are not reused DBZ-6670

    • Oracle DDL parser does not support column visibility on ALTER TABLE DBZ-6677

    • Propagate source column name and allow sink to use it DBZ-6684

    • Partition duplication after rebalances with single leader task DBZ-6685

    • JDBC Sink Connector Fails on Loading Flat Data Containing Struct Type Fields from Kafka DBZ-6686

    • SQLSyntaxErrorException using Debezium JDBC Sink connector DBZ-6687

    • Should use topic.prefix rather than connector.server.name in MBean namings DBZ-6690

    • CDC - Debezium x RabbitMQ - Debezium Server crashes when an UPDATE/DELETE on source database (PostgreSQL) DBZ-6691

    • Missing operationTime field on ping command when executed against Atlas DBZ-6700

    • MongoDB SRV protocol not working in Debezium Server DBZ-6701

    • Disable jdk-outreach-workflow.yml in forked personal repo DBZ-6702

    • Custom properties step not working correctly in validation of the properties added by user DBZ-6711

    • Add tzdata-java to UI installation Dockerfile DBZ-6713

    • Refactor EmbeddedEngine::run method DBZ-6715

    • Oracle fails to process a DROP USER DBZ-6716

    • Support alternative JDBC drivers in MySQL connector DBZ-6727

    • Oracle LogMiner mining distance calculation should be skipped when upper bounds is not within distance DBZ-6733

    • Add STOPPED and RESTARTING connector states to testing library DBZ-6734

    • MariaDB: Unparseable DDL statement (ALTER TABLE IF EXISTS) DBZ-6736

    • Update Quarkus to 3.2.3.Final DBZ-6740

    • Decouple Debezium Server and Extension Quarkus versions DBZ-6744

    • SingleProcessor remove redundant filter logic DBZ-6745

    • MySQL dialect does not properly recognize non-default value longblob types due to typo DBZ-6753

    • Add a new parameter for selecting the db index when using Redis Storage DBZ-6759

    • Postgres tests for toasted byte array and toasted date array fail with decoderbufs plugin DBZ-6767

    • Table schemas should be updated for each shard individually DBZ-6775

    • Notifications and signals leaks between MBean instances when using JMX channels DBZ-6777

    • Oracle XML column types are not properly resolved when adding XMLTYPE column during streaming DBZ-6782

    • Bump the MySQL binlog client version to 0.28.1 which includes significant GTID event performance improvements DBZ-6783

    • Add new Redis Sink connector parameter description to the documentation DBZ-6784

    • Upgrade Kafka to 3.5.1 DBZ-6785

    What’s next?

    The Debezium 2.4 series is already packed with lots of new features, and we’re only scratching the surface. We have more in-store, including the new Oracle OpenLogReplicator adapter coming with Debezium 2.4 Alpha3 next week. After that, we’ll begin to wind down the development and shift our focus in the beta and release candidate cycle, targeting the end of September for a Debezium 2.4 final release.

    Don’t forget about the Debezium Community Event, which I shared with you on the mailing list. If you have any ideas or suggestions, I’d love your feedback. We will be making an announcement in the next two weeks about the date/time and agenda.

    Additionally, if you’re going to Current 2023 this year in San Jose, I’d love to meet up and discuss your experiences with Debezium. I’ll be there doing a talk on event-driven design with Debezium and Apicurio with my good friends Hans-Peter Grahsl and Carles Arnal. If you’re interested in more details, feel free to drop me a line in chat, on the mailing list or directly via email.

    As always, if you have any ideas or suggestions, you can also get in touch with us on the mailing list or our chat. Until next time, don’t be a stranger and stay cool out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +database.jdbc.protocol=jdbc:mysql:aws

    The database.jdbc.driver specifies the driver that should be loaded by the connector and used to communicate with the MySQL database. The database.jdbc.protocol is a supplemental configuration property that may not be required in all contexts. It defaults to jdbc:mysql but since AWS requires jdbc:mysql:aws, this allows you to specify this derivative within the configuration.

    We’ve love to hear feedback and whether something like this might be useful for other scenarios.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Switch tracing to OpenTelemetry DBZ-2862

    • Connector drop down causes a scroll bar DBZ-5421

    • Provide outline for drawer component showing connector details DBZ-5831

    • Modify scroll for the running connector component DBZ-5832

    • Connector restart regression DBZ-6213

    • Highlight information about how to configure the schema history topic to store data only for intended tables DBZ-6219

    • Document Optimal MongoDB Oplog Config for Resiliency DBZ-6455

    • JDBC Schema History: When the table name is passed as dbName.tableName, the connector does not start DBZ-6484

    • Update the Edit connector UI to incorporate the feedback received from team in demo DBZ-6514

    • Support blocking ad-hoc snapshots DBZ-6566

    • Add new parameters to RabbitMQ consumer DBZ-6581

    • Document read preference changes in 2.4 DBZ-6591

    • Oracle DDL parser does not properly detect end of statement when comments obfuscate the semicolon DBZ-6599

    • Received an unexpected message type that does not have an 'after' Debezium block DBZ-6637

    • When Debezium Mongodb connector encounter authentication or under privilege errors, the connection between debezium and mongodb keeps going up. DBZ-6643

    • Log appropriate error when JDBC connector receive SchemaChange record DBZ-6655

    • Send tombstone events when partition queries are finished DBZ-6658

    • Snapshot will not capture data when signal.data.collection is present without table.include.list DBZ-6669

    • Retriable operations are retried infinitely since error handlers are not reused DBZ-6670

    • Oracle DDL parser does not support column visibility on ALTER TABLE DBZ-6677

    • Propagate source column name and allow sink to use it DBZ-6684

    • Partition duplication after rebalances with single leader task DBZ-6685

    • JDBC Sink Connector Fails on Loading Flat Data Containing Struct Type Fields from Kafka DBZ-6686

    • SQLSyntaxErrorException using Debezium JDBC Sink connector DBZ-6687

    • Should use topic.prefix rather than connector.server.name in MBean namings DBZ-6690

    • CDC - Debezium x RabbitMQ - Debezium Server crashes when an UPDATE/DELETE on source database (PostgreSQL) DBZ-6691

    • Missing operationTime field on ping command when executed against Atlas DBZ-6700

    • MongoDB SRV protocol not working in Debezium Server DBZ-6701

    • Disable jdk-outreach-workflow.yml in forked personal repo DBZ-6702

    • Custom properties step not working correctly in validation of the properties added by user DBZ-6711

    • Add tzdata-java to UI installation Dockerfile DBZ-6713

    • Refactor EmbeddedEngine::run method DBZ-6715

    • Oracle fails to process a DROP USER DBZ-6716

    • Support alternative JDBC drivers in MySQL connector DBZ-6727

    • Oracle LogMiner mining distance calculation should be skipped when upper bounds is not within distance DBZ-6733

    • Add STOPPED and RESTARTING connector states to testing library DBZ-6734

    • MariaDB: Unparseable DDL statement (ALTER TABLE IF EXISTS) DBZ-6736

    • Update Quarkus to 3.2.3.Final DBZ-6740

    • Decouple Debezium Server and Extension Quarkus versions DBZ-6744

    • SingleProcessor remove redundant filter logic DBZ-6745

    • MySQL dialect does not properly recognize non-default value longblob types due to typo DBZ-6753

    • Add a new parameter for selecting the db index when using Redis Storage DBZ-6759

    • Postgres tests for toasted byte array and toasted date array fail with decoderbufs plugin DBZ-6767

    • Table schemas should be updated for each shard individually DBZ-6775

    • Notifications and signals leaks between MBean instances when using JMX channels DBZ-6777

    • Oracle XML column types are not properly resolved when adding XMLTYPE column during streaming DBZ-6782

    • Bump the MySQL binlog client version to 0.28.1 which includes significant GTID event performance improvements DBZ-6783

    • Add new Redis Sink connector parameter description to the documentation DBZ-6784

    • Upgrade Kafka to 3.5.1 DBZ-6785

    What’s next?

    The Debezium 2.4 series is already packed with lots of new features, and we’re only scratching the surface. We have more in-store, including the new Oracle OpenLogReplicator adapter coming with Debezium 2.4 Alpha3 next week. After that, we’ll begin to wind down the development and shift our focus in the beta and release candidate cycle, targeting the end of September for a Debezium 2.4 final release.

    Don’t forget about the Debezium Community Event, which I shared with you on the mailing list. If you have any ideas or suggestions, I’d love your feedback. We will be making an announcement in the next two weeks about the date/time and agenda.

    Additionally, if you’re going to Current 2023 this year in San Jose, I’d love to meet up and discuss your experiences with Debezium. I’ll be there doing a talk on event-driven design with Debezium and Apicurio with my good friends Hans-Peter Grahsl and Carles Arnal. If you’re interested in more details, feel free to drop me a line in chat, on the mailing list or directly via email.

    As always, if you have any ideas or suggestions, you can also get in touch with us on the mailing list or our chat. Until next time, don’t be a stranger and stay cool out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/08/29/debezium-2-4-beta1-released/index.html b/blog/2023/08/29/debezium-2-4-beta1-released/index.html index 1f3b09be9a..774d33eeca 100644 --- a/blog/2023/08/29/debezium-2-4-beta1-released/index.html +++ b/blog/2023/08/29/debezium-2-4-beta1-released/index.html @@ -8,4 +8,4 @@ </threads> </infinispan>

    With Debezium 2.4, if you are using the Infinispan-embedded buffer, you can now safely configure the overall embedded global configuration for Infinispan, which can allow you to tune and improve the overall performance when using the embedded Infinispan engine.

    SQL Sever heartbeat improvements

    It’s not an uncommon situation for a database to go for a period of time without there being any relevant changes, whether that is due to inactivity or changes that do occur being of no interest to the connector based on configuration. In these cases, it’s critical that offset metadata managed by the connector remains synchronized with the offset backing store during these periods so that a restart of the connector works as expected.

    With Debezium 2.4, if a SQL Server change capture loop does not find any changes or the changes that did occur are not of any relevance to the connector, the connector will continue to emit heartbeat events when enabled. This should improve the reliability of the offsets stored in the offset backing store across a variety of use cases.

    Vitess shardless naming strategy

    Debezium 2.4.0.Alpha2 introduced a mechanism to handle schema changes per shard by using the shard name as the catalog when identifying the relational identifier for a table. When using the DefaultTopicNamingStrategy, this had the side effect that the shard would be included within the topic name, which may not be desirable.

    Debezium 2.4.0.Beta1 introduces a new strategy that enables the old behavior called TableTopicNamingStrategy.

    The following table shows the output differences for topic names based on the different strategies:

    Strategy Topic Output

    DefaultTopicNamingStrategy

    <topic.prefix>.<shard>.<table-name>

    TableTopicNamingStrategy

    <topic.prefix>.<table-name>

    In order to configure the table topic naming strategy, include the following configuration for the connector:

    topic.naming.strategy=io.debezium.connector.vitess.TableTopicNamingStrategy

    JDBC sink SQL Server identity inserts

    Each database handles the insertion of values into an identity-based column differently. With SQL Server, this requires the explicit enablement of IDENTITY_INSERT prior to the insert and the disabling of this feature afterward. With Debezium 2.4, the Debezium JDBC sink connector provides support for this in the target database.

    In order to take advantage of identity-based inserts, the JDBC sink connector must be configured with a new dialect-based property called dialect.sqlserver.identity.inserts, which can be set to true or false. By default, this feature is set to false and must be enabled if you wish to insert into identity-based columns.

    When enabled, all insert and upsert operations will be wrapped as follows:

    SET IDENTITY_INSERT <table-name> ON;
     <the insert or upsert statement>
    -SET IDENTITY_INSERT <table-name> OFF;

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Debezium heartbeat.action.query does not start before writing to WAL DBZ-6635

    • Schema name changed with Custom topic naming strategy DBZ-6641

    • Wrong behavior of quote.identifiers in JdbcSinkConnector DBZ-6682

    • Toasted UUID array is not properly processed DBZ-6720

    • Debezium crashes on parsing MySQL DDL statement (specific JOIN) DBZ-6724

    • Blocking snapshot must take snapshot configurations from signal DBZ-6731

    • When using pgoutput in postgres connector, (+/-)Infinity is not supported in decimal values DBZ-6758

    • Outbox transformation can cause connector to crash DBZ-6760

    • MongoDB New Document State Extraction: nonexistent field for add.headers DBZ-6774

    • Mongodb connector tests are massively failing when executed on 7.0-rc version DBZ-6779

    • Dbz crashes on parsing MySQL DDL statement (SELECT 1.;) DBZ-6780

    • Mysql connector tests are failing when executed without any profile DBZ-6791

    • Dbz crashed on parsing MySQL DDL statement (SELECT 1 + @sum:=1 AS ss;) DBZ-6794

    • MySQL DDL parser - REPEAT function not accepted DBZ-6803

    • Fix bug with getSnapshottingTask DBZ-6820

    • Dbz crashes on DDL statement (non-Latin chars in variables) DBZ-6821

    • Not trim the default value for the BIGINT and SMALLINT types when parsing MySQL DDL DBZ-6824

    • PostgresConnectorIT#shouldAddNewFieldToSourceInfo fails randomly DBZ-6839

    • Wrong filtered comments DBZ-6840

    • Intermittent test failure: BaseSourceTaskTest.verifyTaskRestartsSuccessfully DBZ-6841

    • When using skip.messages.without.change=true a WARN log message is reported for each record DBZ-6843

    Outlook & What’s Next?

    As we enter the beta-phase of Debezium 2.4, the next several weeks will primarily focus on bugfixes and stability as we continue to march forward to a final release at the end of September. We are also close on the last minute changes for the OpenLogReplicator ingestion method for Oracle and once complete, expect a Beta2 shortly afterward. Furthermore, there will be a Debezium 2.3.3.Final maintenance release early next week and likely at least one more 2.3 release as we make the transition to Debezium 2.4 as the new stable release later this coming month.

    In addition, the Debezium Community Event’s agenda and date will be published later this week, so keep an eye out for that news. And finally, we’ll be presenting at Kafka Summit 2023 (aka Current 2023) later this upcoming month. If you’re planning to attend and would like to ask the experts, be sure to get in touch with me or anyone on the team and we can plan to meet up and discuss anything related to Debezium and CDC.

    As always, if you have any ideas or suggestions, you can also get in touch with us on the mailing list or our chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +SET IDENTITY_INSERT <table-name> OFF;

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Debezium heartbeat.action.query does not start before writing to WAL DBZ-6635

    • Schema name changed with Custom topic naming strategy DBZ-6641

    • Wrong behavior of quote.identifiers in JdbcSinkConnector DBZ-6682

    • Toasted UUID array is not properly processed DBZ-6720

    • Debezium crashes on parsing MySQL DDL statement (specific JOIN) DBZ-6724

    • Blocking snapshot must take snapshot configurations from signal DBZ-6731

    • When using pgoutput in postgres connector, (+/-)Infinity is not supported in decimal values DBZ-6758

    • Outbox transformation can cause connector to crash DBZ-6760

    • MongoDB New Document State Extraction: nonexistent field for add.headers DBZ-6774

    • Mongodb connector tests are massively failing when executed on 7.0-rc version DBZ-6779

    • Dbz crashes on parsing MySQL DDL statement (SELECT 1.;) DBZ-6780

    • Mysql connector tests are failing when executed without any profile DBZ-6791

    • Dbz crashed on parsing MySQL DDL statement (SELECT 1 + @sum:=1 AS ss;) DBZ-6794

    • MySQL DDL parser - REPEAT function not accepted DBZ-6803

    • Fix bug with getSnapshottingTask DBZ-6820

    • Dbz crashes on DDL statement (non-Latin chars in variables) DBZ-6821

    • Not trim the default value for the BIGINT and SMALLINT types when parsing MySQL DDL DBZ-6824

    • PostgresConnectorIT#shouldAddNewFieldToSourceInfo fails randomly DBZ-6839

    • Wrong filtered comments DBZ-6840

    • Intermittent test failure: BaseSourceTaskTest.verifyTaskRestartsSuccessfully DBZ-6841

    • When using skip.messages.without.change=true a WARN log message is reported for each record DBZ-6843

    Outlook & What’s Next?

    As we enter the beta-phase of Debezium 2.4, the next several weeks will primarily focus on bugfixes and stability as we continue to march forward to a final release at the end of September. We are also close on the last minute changes for the OpenLogReplicator ingestion method for Oracle and once complete, expect a Beta2 shortly afterward. Furthermore, there will be a Debezium 2.3.3.Final maintenance release early next week and likely at least one more 2.3 release as we make the transition to Debezium 2.4 as the new stable release later this coming month.

    In addition, the Debezium Community Event’s agenda and date will be published later this week, so keep an eye out for that news. And finally, we’ll be presenting at Kafka Summit 2023 (aka Current 2023) later this upcoming month. If you’re planning to attend and would like to ask the experts, be sure to get in touch with me or anyone on the team and we can plan to meet up and discuss anything related to Debezium and CDC.

    As always, if you have any ideas or suggestions, you can also get in touch with us on the mailing list or our chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/09/05/debezium-2-3-3-final-released/index.html b/blog/2023/09/05/debezium-2-3-3-final-released/index.html index 5dec5cb7da..d341e38afa 100644 --- a/blog/2023/09/05/debezium-2-3-3-final-released/index.html +++ b/blog/2023/09/05/debezium-2-3-3-final-released/index.html @@ -1 +1 @@ - Debezium 2.3.3.Final Released

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    Breaking changes

    There is currently one breaking change in this release, where the behavior of Debezium 2.3.3.Final changes from the previous release of Debezium 2.3.2.Final. If you are upgrading from Debezium 2.3.2.Final or any prior version, please review the following breaking changes for details.

    Oracle connector SCN-based metrics

    Oracle tracks a variety of system change number, often called SCN, values in its JMX metrics including OffsetScn, CurrentScn, OldestScn, and CommittedScn. These SCN values are numeric and can often exceed the upper bounds of a Long data type, and so Debezium has traditionally exposed these values as String s.

    Unfortunately, tooling such as Grafana and Prometheus do not work with String -based values, and it has been raised on several occasions that the community would like to be able to view these values from metrics gathering frameworks. With Debezium 2.3.3.Final or later, there is a small behavior change with the these JMX metrics as they are no longer exposed as String values but instead are now exposed as BigInteger values.

    This change in behavior allows tooling such as Grafana and Prometheus to now scrape these values from the JMX beans automatically for reporting and observability stacks.

    If you were previously gathering these values for other purposes, be aware they’re no longer string-based and should be interpreted as BigInteger numerical values moving forward.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Support PostgreSQL coercion for UUID, JSON, and JSONB data types DBZ-6589

    • Debezium 2.3.0.Final Missing Kafka Channel Documentation DBZ-6688

    • Debezium crashes on parsing MySQL DDL statement (specific JOIN) DBZ-6724

    • ExtractNewDocumentState for MongoDB ignore previous document state when handling delete event’s with REWRITE DBZ-6725

    • Missing or misspelled IDs result in downstream build errors DBZ-6754

    • When using pgoutput in postgres connector, (+/-)Infinity is not supported in decimal values DBZ-6758

    • Outbox transformation can cause connector to crash DBZ-6760

    • Postgres tests for toasted byte array and toasted date array fail with decoderbufs plugin DBZ-6767

    • MongoDB New Document State Extraction: nonexistent field for add.headers DBZ-6774

    • Notifications and signals leaks between MBean instances when using JMX channels DBZ-6777

    • Dbz crashes on parsing MySQL DDL statement (SELECT 1.;) DBZ-6780

    • Dbz crashed on parsing MySQL DDL statement (SELECT 1 + @sum:=1 AS ss;) DBZ-6794

    • MySQL DDL parser - REPEAT function not accepted DBZ-6803

    • Dbz crashes on DDL statement (non Latin chars in variables) DBZ-6821

    • Not trim the default value for the BIGINT and SMALLINT types when parsing MySQL DDL DBZ-6824

    • Make partial and multi-response transactions debug level logs DBZ-6830

    • Oracle test shouldContinueToUpdateOffsetsEvenWhenTableIsNotChanged fails with NPE DBZ-6860

    • Streaming aggregation pipeline broken for combination of database filter and signal collection DBZ-6867

    Outlook & What’s Next?

    As the team continues active development of Debezium 2.4, I would expect another maintenance release of Debezium 2.3 later this mont, barring any reported regressions or bugs.

    In addition, the Debezium Community Event’s agenda and date will be published this week, so keep an eye out for that news. And finally, we’ll be presenting at Kafka Summit 2023 (aka Current 2023) later this upcoming month. If you’re planning to attend and would like to ask the experts, be sure to get in touch with me or anyone on the team and we can plan to meet up and discuss anything related to Debezium and CDC.

    As always, if you have any ideas or suggestions, you can also get in touch with us on the mailing list or our chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.3.3.Final Released

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    Breaking changes

    There is currently one breaking change in this release, where the behavior of Debezium 2.3.3.Final changes from the previous release of Debezium 2.3.2.Final. If you are upgrading from Debezium 2.3.2.Final or any prior version, please review the following breaking changes for details.

    Oracle connector SCN-based metrics

    Oracle tracks a variety of system change number, often called SCN, values in its JMX metrics including OffsetScn, CurrentScn, OldestScn, and CommittedScn. These SCN values are numeric and can often exceed the upper bounds of a Long data type, and so Debezium has traditionally exposed these values as String s.

    Unfortunately, tooling such as Grafana and Prometheus do not work with String -based values, and it has been raised on several occasions that the community would like to be able to view these values from metrics gathering frameworks. With Debezium 2.3.3.Final or later, there is a small behavior change with the these JMX metrics as they are no longer exposed as String values but instead are now exposed as BigInteger values.

    This change in behavior allows tooling such as Grafana and Prometheus to now scrape these values from the JMX beans automatically for reporting and observability stacks.

    If you were previously gathering these values for other purposes, be aware they’re no longer string-based and should be interpreted as BigInteger numerical values moving forward.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Support PostgreSQL coercion for UUID, JSON, and JSONB data types DBZ-6589

    • Debezium 2.3.0.Final Missing Kafka Channel Documentation DBZ-6688

    • Debezium crashes on parsing MySQL DDL statement (specific JOIN) DBZ-6724

    • ExtractNewDocumentState for MongoDB ignore previous document state when handling delete event’s with REWRITE DBZ-6725

    • Missing or misspelled IDs result in downstream build errors DBZ-6754

    • When using pgoutput in postgres connector, (+/-)Infinity is not supported in decimal values DBZ-6758

    • Outbox transformation can cause connector to crash DBZ-6760

    • Postgres tests for toasted byte array and toasted date array fail with decoderbufs plugin DBZ-6767

    • MongoDB New Document State Extraction: nonexistent field for add.headers DBZ-6774

    • Notifications and signals leaks between MBean instances when using JMX channels DBZ-6777

    • Dbz crashes on parsing MySQL DDL statement (SELECT 1.;) DBZ-6780

    • Dbz crashed on parsing MySQL DDL statement (SELECT 1 + @sum:=1 AS ss;) DBZ-6794

    • MySQL DDL parser - REPEAT function not accepted DBZ-6803

    • Dbz crashes on DDL statement (non Latin chars in variables) DBZ-6821

    • Not trim the default value for the BIGINT and SMALLINT types when parsing MySQL DDL DBZ-6824

    • Make partial and multi-response transactions debug level logs DBZ-6830

    • Oracle test shouldContinueToUpdateOffsetsEvenWhenTableIsNotChanged fails with NPE DBZ-6860

    • Streaming aggregation pipeline broken for combination of database filter and signal collection DBZ-6867

    Outlook & What’s Next?

    As the team continues active development of Debezium 2.4, I would expect another maintenance release of Debezium 2.3 later this mont, barring any reported regressions or bugs.

    In addition, the Debezium Community Event’s agenda and date will be published this week, so keep an eye out for that news. And finally, we’ll be presenting at Kafka Summit 2023 (aka Current 2023) later this upcoming month. If you’re planning to attend and would like to ask the experts, be sure to get in touch with me or anyone on the team and we can plan to meet up and discuss anything related to Debezium and CDC.

    As always, if you have any ideas or suggestions, you can also get in touch with us on the mailing list or our chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/09/10/Debezium-UI-connectors-custom-property/index.html b/blog/2023/09/10/Debezium-UI-connectors-custom-property/index.html index 0f17047bc5..e41eb3cd27 100644 --- a/blog/2023/09/10/Debezium-UI-connectors-custom-property/index.html +++ b/blog/2023/09/10/Debezium-UI-connectors-custom-property/index.html @@ -1 +1 @@ - Enable pass-through of additional properties in Debezium UI

    The Debezium UI team continues to add support for more features, allowing users to configure connectors more easily. In this article, we’ll describe and demonstrate how to provide the additional properties for configuration that the UI does not expose by default. Read further for more information!

    Need for custom properties step

    Debezium is very flexible - each connector can be configured and fine-tuned in various ways. Debezium UI already exposes an extensive list of properties that are segregated under different steps for configuration. However, despite the UI exposing a wide range of properties, there are situations where additional custom properties may be necessary beyond those exposed out of the box. For example, producer., consumer., and even driver.* properties are pass-through configuration options for configuring parts of the Kafka Connect pipeline or the JDBC driver, respectively.

    Example

    Debezium must configure the Kafka producer settings to write schema change events to the schema history topic for connectors that utilize a schema history topic, like MySQL. The connector configuration includes a subset of options that act as pass-through properties that begin with the schema.history.internal.producer.* prefix.

    Debezium strips the prefix from the property names before passing the properties to the Kafka client.

    Custom properties step

    The Debezium UI allows you to configure additional properties for connectors not exposed in the UI. For this, we have created a new extra step called Custom Properties in the configuration wizard.

    The custom properties step allows you to provide the additional properties by entering a key-value pair. Once you enter the additional configuration properties key and value, click the Apply button.

    When clicking the Apply button, the UI validates the user entries and provides feedback if there are any problems. If there are no problems, the UI will show a green check icon on the right side of all the successfully added additional properties. You can also remove any added property by clicking on the Remove button on the right side of the property.

    On the left-hand side, you can see the list of already configured properties from the previous wizard step, and the newly added custom properties in this step are highlighted in blue color.

    In the screenshot above, the user added schema.history.internal.producer.* related additional properties.

    Self-contained example

    You can try out configuring the connector with some custom properties (and more) with our self-contained example UI demo - which is included under debezium-examples on GitHub. The UI demo includes a Docker Compose file, which brings up several sources with data and the UI. Please refer to the README file for more details on running the Debezium UI demo.

    To learn more about the Debezium UI, please refer to the reference documentation.

    More coming soon!

    Stay tuned for further improvements and new feature announcements in the UI in the coming releases.

    A big thank you to the team who have contributed in many ways: Anisha Mohanty, René Kerner and Chrish Cranford!

    Indra Raj Shukla

    Indra is a Senior software developer at Red Hat. He has extensive experiance in UI development. He lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Enable pass-through of additional properties in Debezium UI

    The Debezium UI team continues to add support for more features, allowing users to configure connectors more easily. In this article, we’ll describe and demonstrate how to provide the additional properties for configuration that the UI does not expose by default. Read further for more information!

    Need for custom properties step

    Debezium is very flexible - each connector can be configured and fine-tuned in various ways. Debezium UI already exposes an extensive list of properties that are segregated under different steps for configuration. However, despite the UI exposing a wide range of properties, there are situations where additional custom properties may be necessary beyond those exposed out of the box. For example, producer., consumer., and even driver.* properties are pass-through configuration options for configuring parts of the Kafka Connect pipeline or the JDBC driver, respectively.

    Example

    Debezium must configure the Kafka producer settings to write schema change events to the schema history topic for connectors that utilize a schema history topic, like MySQL. The connector configuration includes a subset of options that act as pass-through properties that begin with the schema.history.internal.producer.* prefix.

    Debezium strips the prefix from the property names before passing the properties to the Kafka client.

    Custom properties step

    The Debezium UI allows you to configure additional properties for connectors not exposed in the UI. For this, we have created a new extra step called Custom Properties in the configuration wizard.

    The custom properties step allows you to provide the additional properties by entering a key-value pair. Once you enter the additional configuration properties key and value, click the Apply button.

    When clicking the Apply button, the UI validates the user entries and provides feedback if there are any problems. If there are no problems, the UI will show a green check icon on the right side of all the successfully added additional properties. You can also remove any added property by clicking on the Remove button on the right side of the property.

    On the left-hand side, you can see the list of already configured properties from the previous wizard step, and the newly added custom properties in this step are highlighted in blue color.

    In the screenshot above, the user added schema.history.internal.producer.* related additional properties.

    Self-contained example

    You can try out configuring the connector with some custom properties (and more) with our self-contained example UI demo - which is included under debezium-examples on GitHub. The UI demo includes a Docker Compose file, which brings up several sources with data and the UI. Please refer to the README file for more details on running the Debezium UI demo.

    To learn more about the Debezium UI, please refer to the reference documentation.

    More coming soon!

    Stay tuned for further improvements and new feature announcements in the UI in the coming releases.

    A big thank you to the team who have contributed in many ways: Anisha Mohanty, René Kerner and Chrish Cranford!

    Indra Raj Shukla

    Indra is a Senior software developer at Red Hat. He has extensive experiance in UI development. He lives in Bangalore, India.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/09/13/debezium-2-4-beta2-released/index.html b/blog/2023/09/13/debezium-2-4-beta2-released/index.html index 6a832a1163..d0be75a625 100644 --- a/blog/2023/09/13/debezium-2-4-beta2-released/index.html +++ b/blog/2023/09/13/debezium-2-4-beta2-released/index.html @@ -26,4 +26,4 @@ }

    In addition, if the authentication needs to use another database besides admin, the connector configuration can also include the mongodb.authsource property to control what authentication database should be used.

    For more information, please see the documentation.

    Configurable order of aggregation pipeline

    Debezium 2.4 now provides a way to control the aggregation order of the change streams pipeline. This can be critical when specific documents are being aggregated that could lead to pipeline problems such as large documents.

    By default, the connector applies the MongoDB internal pipeline filters and then any user-constructed filters; however this could lead to situations where large documents make it into the pipeline and MongoDB could throw an error if the document exceeds the internal 16Mb limit. In such use cases, the connector can now be configured to apply the user stages to the pipeline first defined by cursor.pipeline to filter out such use cases to avoid the pipeline from failing due to the 16Mb limit.

    To accomplish this, simply apply the following configuration to the connector:

    {
       "cursor.pipeline.order": "user_first",
       "cursor.pipeline": "<custom-pipeline-filters>"
    -}

    For more details, please see the documentation.

    MongoDB 7 support

    MongoDB 7.0 was released just last month and Debezium 2.4 ships with MongoDB 7 support.

    If you are looking to upgrade to MongoDB 7 for your environment, you can easily do so as Debezium 2.4+ is fully compatible with the newer version. If you encounter any problems, please let us know.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Documentation content section in the debezium.io scroll over to the top header. DBZ-5942

    • Only publish deltas instead of full snapshots to reduce size of sync event messages DBZ-6458

    • Postgres - Incremental snapshot fails on tables with an enum type in the primary key DBZ-6481

    • schema.history.internal.store.only.captured.databases.ddl flag not considered while snapshot schema to history topic DBZ-6712

    • ExtractNewDocumentState for MongoDB ignore previous document state when handling delete event’s with REWRITE DBZ-6725

    • MongoDB New Document State Extraction: original name overriding does not work DBZ-6773

    • Error with propagation source column name DBZ-6831

    • Support truncating large columns DBZ-6844

    • Always reset VStream grpc channel when max size is exceeded DBZ-6852

    • Kafka offset store fails with NPE DBZ-6853

    • JDBC Offset storage - configuration of table name does not work DBZ-6855

    • JDBC sink insert fails with Oracle target database due to semicolon DBZ-6857

    • Oracle test shouldContinueToUpdateOffsetsEvenWhenTableIsNotChanged fails with NPE DBZ-6860

    • Tombstone events causes NPE on JDBC connector DBZ-6862

    • Debezium-MySQL not filtering AWS RDS internal events DBZ-6864

    • Avoid getting NPE when executing the arrived method in ExecuteSnapshot DBZ-6865

    • errors.max.retries = 0 Causes retrievable error to be ignored DBZ-6866

    • Streaming aggregation pipeline broken for combination of database filter and signal collection DBZ-6867

    • ChangeStream aggregation pipeline fails on large documents which should be excluded DBZ-6871

    • Oracle alter table drop constraint fails when cascading index DBZ-6876

    Altogether, a total of 36 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Andy Pickler, Anisha Mohanty, Breno Moreira, Chris Cranford, Harvey Yue, Indra Shukla, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Nancy Xu, Nir Levy, Ondrej Babec, Thomas Thornton, and tison!

    Outlook & What’s Next?

    Debezium 2.4 is shaping up quite nicely with our second Beta2 preview release which now includes OpenLogReplicator support. We intend to spend the remaining several weeks as we move toward a 2.4 final working on stability and any regressions that are identified. We encourage you to give Debezium 2.4.0.Beta2 a try. I would anticipate a Beta3 likely next week to address any shortcomings with OpenLogReplicator with the hope of a final by end of the month.

    Don’t forget about the Debezium Community Event, which I shared with you on the mailing list. The event will be held on Thursday, September 21st at 8:00am EDT (12:00pm UTC) where we’ll discuss Debezium 2.4 and the future. Details are available on the Zulip chat thread, so be sure to join if you are able, we’d love to see you there.

    Additionally, if you intend to participate at Current 2023 (formerly Kafka Summit) in San Jose, California, I will be there doing on a presentation on Debezium and data pipelines Wednesday afternoon with my good friend Carles Arnal. There will also be another presentation by my colleague Hans-Peter Grahsl on event-driven design you shouldn’t miss. If you’d like to meet up and have a quick chat about Debezium, your experiences, or even just to say "Hi", I’d love to chat. Please feel free to ping me on Zulip (@Chris Cranford) or send me a notification on Twitter (@crancran77).

    As always, if you have any ideas or suggestions, you can also get in touch with us on the mailing list or our chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    For more details, please see the documentation.

    MongoDB 7 support

    MongoDB 7.0 was released just last month and Debezium 2.4 ships with MongoDB 7 support.

    If you are looking to upgrade to MongoDB 7 for your environment, you can easily do so as Debezium 2.4+ is fully compatible with the newer version. If you encounter any problems, please let us know.

    Other fixes & improvements

    There are several bugfixes and stability changes in this release, some noteworthy are:

    • Documentation content section in the debezium.io scroll over to the top header. DBZ-5942

    • Only publish deltas instead of full snapshots to reduce size of sync event messages DBZ-6458

    • Postgres - Incremental snapshot fails on tables with an enum type in the primary key DBZ-6481

    • schema.history.internal.store.only.captured.databases.ddl flag not considered while snapshot schema to history topic DBZ-6712

    • ExtractNewDocumentState for MongoDB ignore previous document state when handling delete event’s with REWRITE DBZ-6725

    • MongoDB New Document State Extraction: original name overriding does not work DBZ-6773

    • Error with propagation source column name DBZ-6831

    • Support truncating large columns DBZ-6844

    • Always reset VStream grpc channel when max size is exceeded DBZ-6852

    • Kafka offset store fails with NPE DBZ-6853

    • JDBC Offset storage - configuration of table name does not work DBZ-6855

    • JDBC sink insert fails with Oracle target database due to semicolon DBZ-6857

    • Oracle test shouldContinueToUpdateOffsetsEvenWhenTableIsNotChanged fails with NPE DBZ-6860

    • Tombstone events causes NPE on JDBC connector DBZ-6862

    • Debezium-MySQL not filtering AWS RDS internal events DBZ-6864

    • Avoid getting NPE when executing the arrived method in ExecuteSnapshot DBZ-6865

    • errors.max.retries = 0 Causes retrievable error to be ignored DBZ-6866

    • Streaming aggregation pipeline broken for combination of database filter and signal collection DBZ-6867

    • ChangeStream aggregation pipeline fails on large documents which should be excluded DBZ-6871

    • Oracle alter table drop constraint fails when cascading index DBZ-6876

    Altogether, a total of 36 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Andy Pickler, Anisha Mohanty, Breno Moreira, Chris Cranford, Harvey Yue, Indra Shukla, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Nancy Xu, Nir Levy, Ondrej Babec, Thomas Thornton, and tison!

    Outlook & What’s Next?

    Debezium 2.4 is shaping up quite nicely with our second Beta2 preview release which now includes OpenLogReplicator support. We intend to spend the remaining several weeks as we move toward a 2.4 final working on stability and any regressions that are identified. We encourage you to give Debezium 2.4.0.Beta2 a try. I would anticipate a Beta3 likely next week to address any shortcomings with OpenLogReplicator with the hope of a final by end of the month.

    Don’t forget about the Debezium Community Event, which I shared with you on the mailing list. The event will be held on Thursday, September 21st at 8:00am EDT (12:00pm UTC) where we’ll discuss Debezium 2.4 and the future. Details are available on the Zulip chat thread, so be sure to join if you are able, we’d love to see you there.

    Additionally, if you intend to participate at Current 2023 (formerly Kafka Summit) in San Jose, California, I will be there doing on a presentation on Debezium and data pipelines Wednesday afternoon with my good friend Carles Arnal. There will also be another presentation by my colleague Hans-Peter Grahsl on event-driven design you shouldn’t miss. If you’d like to meet up and have a quick chat about Debezium, your experiences, or even just to say "Hi", I’d love to chat. Please feel free to ping me on Zulip (@Chris Cranford) or send me a notification on Twitter (@crancran77).

    As always, if you have any ideas or suggestions, you can also get in touch with us on the mailing list or our chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/09/22/debezium-2-4-cr1-released/index.html b/blog/2023/09/22/debezium-2-4-cr1-released/index.html index 50eeb2679c..f3ea562072 100644 --- a/blog/2023/09/22/debezium-2-4-cr1-released/index.html +++ b/blog/2023/09/22/debezium-2-4-cr1-released/index.html @@ -1 +1 @@ - Debezium 2.4.0.CR1 Released

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1.

    The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall we?!

    Breaking changes

    The community led Vitess connector was retrying only a subset of errors by default. This behavior has been changed and now only explicitly defined errors are not retried. For more details, please see DBZ-6944.

    New Features

    MongoDB parallel incremental snapshots

    Since the introduction of incremental snapshots back in Debezium 1.x, the process to incremental snapshot existing data while concurrently capturing changes from a database transaction has been a single-threaded activity. It’s not uncommon when adding new features to focus on the basics and build upon that foundation, which is precisely what has happened with MongoDB.

    In Debezium 2.4, we are taking the first steps to add parallel support to incremental snapshots with the MongoDB connector by reading multiple chunks in parallel. This should allow faster throughput at the cost of memory while the chunks are being collected, sorted, and deduplication occurs against the transaction log capture data set. Thanks to Yue Wang for starting this effort in DBZ-6518, it’s most definitely something we are looking to explore for the relational connectors in an upcoming Debezium release.

    PostgreSQL 16 support

    PostgreSQL announced the immediate release for PostgreSQL 16 just over a week ago, and we’re pleased to announce that Debezium 2.4 will support that release.

    PostgreSQL 16 introduces logical replication from standby servers; however, this feature has not yet been tested by Debezium and will be a feature introduced in a later build of Debezium. For now, logical replication remains only supported via the primary.

    Google Spanner GKE workload identity support

    Google Kubernetes Engine (GKE) supports identity workloads, allowing you to use a more secure authentication mechanism than the traditional JSON-based keys. In Debezium 2.4, when no JSON key is explicitly set, the Spanner connector will now automatically default to GKE workload identity authentication. Thanks to laughingman7743 for this effort as a part of DBZ-6885.

    Other Fixes

    • Ad-hoc blocking snaps trigger emits schema changes of all tables DBZ-6828

    • When the start_scn corresponding to the existence of a transaction in V$TRANSACTION is 0, log mining starts from the oldest scn when the oracle connector is started for the first time DBZ-6869

    • Ensure that the connector can handle rebalance events robustly DBZ-6870

    • OpenLogReplicator confirmation can resend or omit events on restarts DBZ-6895

    • ExtractNewRecordState’s schema cache is not updated with arrival of the ddl change event DBZ-6901

    • Misleading Debezium error message when RDI port is not specified in application.properties DBZ-6902

    • Generting protobuf files to target/generated-sources breaks build DBZ-6903

    • Clean log printout in Redis Debezium Sink DBZ-6908

    • Values being omitted from list of JSON object DBZ-6910

    • fix logger named DBZ-6935

    • MySql connector get NPE when snapshot.mode is set to never and signal data collection configured DBZ-6937

    • Sanity check / retry for redo logs does not work per Oracle RAC thread DBZ-6938

    • Drop events has wrong table changes information DBZ-6945

    • Remove spaces from Signal and Notification MBean’s ObjectName DBZ-6957

    Altogether, 20 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Andy Pickler, Anisha Mohanty, Breno Moreira, Chris Cranford, Harvey Yue, Indra Shukla, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Nancy Xu, Nir Levy, Ondrej Babec, René Kerner, Sergey Eizner, Thomas Thornton, Wu Zhenhua, Zheng Wang, laughingman7743, and tison!

    Outlook and What’s next?

    We’re now at the junction where we begin to set our vision on Debezium 2.5 and what lies ahead. We recently held our first Community Meeting and discussed a number of our 2.5 roadmap ideas, some of which include:

    • Parallel incremental snapshots for relational connectors.

    • Improved MongoDB support for BSONDocument exceeding 16MB.

    • Db2 support on z/OS and iSeries platforms.

    • Batch support in the JDBC sink connector.

    • Parallelization of tasks and other Debezium Engine internals.

    • Preview of MariaDB and Oracle 23 support

    For more details, please check out our road map for all upcoming details around Debezium 2.5 and beyond.

    Additionally, Debezium will be at Current 2023 next week. If you are attending, be sure to stop by the Ask-The-Experts session on Wednesday at 2:30PM to catch a sesssion on Debezium and Kafka. Additionally, be sure to check out the sponsored session on Wednesday at 4:30PM to find out just how easy it is to deploy data pipelines from the edge to the cloud using open-source projects such as Debezium, Strimzi, Apicurio, and Kubernetes.

    As always, if you have any questions, suggestions, or feedback, please reach out to us on our mailing list or chat. We always enjoy hearing what you have to share. Until next time, be safe.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.4.0.CR1 Released

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1.

    The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall we?!

    Breaking changes

    The community led Vitess connector was retrying only a subset of errors by default. This behavior has been changed and now only explicitly defined errors are not retried. For more details, please see DBZ-6944.

    New Features

    MongoDB parallel incremental snapshots

    Since the introduction of incremental snapshots back in Debezium 1.x, the process to incremental snapshot existing data while concurrently capturing changes from a database transaction has been a single-threaded activity. It’s not uncommon when adding new features to focus on the basics and build upon that foundation, which is precisely what has happened with MongoDB.

    In Debezium 2.4, we are taking the first steps to add parallel support to incremental snapshots with the MongoDB connector by reading multiple chunks in parallel. This should allow faster throughput at the cost of memory while the chunks are being collected, sorted, and deduplication occurs against the transaction log capture data set. Thanks to Yue Wang for starting this effort in DBZ-6518, it’s most definitely something we are looking to explore for the relational connectors in an upcoming Debezium release.

    PostgreSQL 16 support

    PostgreSQL announced the immediate release for PostgreSQL 16 just over a week ago, and we’re pleased to announce that Debezium 2.4 will support that release.

    PostgreSQL 16 introduces logical replication from standby servers; however, this feature has not yet been tested by Debezium and will be a feature introduced in a later build of Debezium. For now, logical replication remains only supported via the primary.

    Google Spanner GKE workload identity support

    Google Kubernetes Engine (GKE) supports identity workloads, allowing you to use a more secure authentication mechanism than the traditional JSON-based keys. In Debezium 2.4, when no JSON key is explicitly set, the Spanner connector will now automatically default to GKE workload identity authentication. Thanks to laughingman7743 for this effort as a part of DBZ-6885.

    Other Fixes

    • Ad-hoc blocking snaps trigger emits schema changes of all tables DBZ-6828

    • When the start_scn corresponding to the existence of a transaction in V$TRANSACTION is 0, log mining starts from the oldest scn when the oracle connector is started for the first time DBZ-6869

    • Ensure that the connector can handle rebalance events robustly DBZ-6870

    • OpenLogReplicator confirmation can resend or omit events on restarts DBZ-6895

    • ExtractNewRecordState’s schema cache is not updated with arrival of the ddl change event DBZ-6901

    • Misleading Debezium error message when RDI port is not specified in application.properties DBZ-6902

    • Generting protobuf files to target/generated-sources breaks build DBZ-6903

    • Clean log printout in Redis Debezium Sink DBZ-6908

    • Values being omitted from list of JSON object DBZ-6910

    • fix logger named DBZ-6935

    • MySql connector get NPE when snapshot.mode is set to never and signal data collection configured DBZ-6937

    • Sanity check / retry for redo logs does not work per Oracle RAC thread DBZ-6938

    • Drop events has wrong table changes information DBZ-6945

    • Remove spaces from Signal and Notification MBean’s ObjectName DBZ-6957

    Altogether, 20 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Andy Pickler, Anisha Mohanty, Breno Moreira, Chris Cranford, Harvey Yue, Indra Shukla, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Nancy Xu, Nir Levy, Ondrej Babec, René Kerner, Sergey Eizner, Thomas Thornton, Wu Zhenhua, Zheng Wang, laughingman7743, and tison!

    Outlook and What’s next?

    We’re now at the junction where we begin to set our vision on Debezium 2.5 and what lies ahead. We recently held our first Community Meeting and discussed a number of our 2.5 roadmap ideas, some of which include:

    • Parallel incremental snapshots for relational connectors.

    • Improved MongoDB support for BSONDocument exceeding 16MB.

    • Db2 support on z/OS and iSeries platforms.

    • Batch support in the JDBC sink connector.

    • Parallelization of tasks and other Debezium Engine internals.

    • Preview of MariaDB and Oracle 23 support

    For more details, please check out our road map for all upcoming details around Debezium 2.5 and beyond.

    Additionally, Debezium will be at Current 2023 next week. If you are attending, be sure to stop by the Ask-The-Experts session on Wednesday at 2:30PM to catch a sesssion on Debezium and Kafka. Additionally, be sure to check out the sponsored session on Wednesday at 4:30PM to find out just how easy it is to deploy data pipelines from the edge to the cloud using open-source projects such as Debezium, Strimzi, Apicurio, and Kubernetes.

    As always, if you have any questions, suggestions, or feedback, please reach out to us on our mailing list or chat. We always enjoy hearing what you have to share. Until next time, be safe.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/09/23/flink-spark-online-learning/index.html b/blog/2023/09/23/flink-spark-online-learning/index.html index 7f5c8d6e04..aff8887d1b 100644 --- a/blog/2023/09/23/flink-spark-online-learning/index.html +++ b/blog/2023/09/23/flink-spark-online-learning/index.html @@ -151,4 +151,4 @@ spark_1 | (1.0,1) spark_1 | (0.0,0) spark_1 | (1.0,1) -----

    The prediction is a number of the cluster which k-means algorithm created and has no relation to labels in our data sample. That means that e.g. (0.0,1) doesn’t have to be a wrong prediction. It can happen that a data point with label 0 was assigned to the correct cluster, however, Spark internally marked it as a cluster number 1. This needs to be kept in mind when evaluating the model.

    So, similar to Flink, we get better results as we pass more training data without the need to re-train and re-deploy the model. In this case, we get even better results than Flink’s model.

    Conclusions

    In this blog post, we continued exploring how Debezium can help make data ingestion into various ML frameworks seamless. We have shown how to pass the data from the database to Apache Flink and Apache Spark in real time as a stream of the data. The integration is easy to set up in both cases and works well. We demonstrated it in an example that allows us to use an online learning algorithm, namely the online k-means algorithm, to highlight the power of data streaming. Online machine learning allows us to make real-time predictions on the data stream and improve or adjust the model immediately as the new training data arrives. Model adjustment doesn’t require any model re-training on a separate compute cluster and re-deploying a new model, making ML-ops more straightforward and cost-effective.

    As usual, we would appreciate any feedback on this blog post. Do you have any ideas on how Debezium or change data capture can be helpful in this area? What would be helpful to investigate, whether integration with another ML framework, integration with a specific ML feature store, etc.? In case you have any input any this regard, don’t hesitate to reach out to us on the Zulip chat, mailing list or you can transform your ideas directly into Jira feature requests.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +----

    The prediction is a number of the cluster which k-means algorithm created and has no relation to labels in our data sample. That means that e.g. (0.0,1) doesn’t have to be a wrong prediction. It can happen that a data point with label 0 was assigned to the correct cluster, however, Spark internally marked it as a cluster number 1. This needs to be kept in mind when evaluating the model.

    So, similar to Flink, we get better results as we pass more training data without the need to re-train and re-deploy the model. In this case, we get even better results than Flink’s model.

    Conclusions

    In this blog post, we continued exploring how Debezium can help make data ingestion into various ML frameworks seamless. We have shown how to pass the data from the database to Apache Flink and Apache Spark in real time as a stream of the data. The integration is easy to set up in both cases and works well. We demonstrated it in an example that allows us to use an online learning algorithm, namely the online k-means algorithm, to highlight the power of data streaming. Online machine learning allows us to make real-time predictions on the data stream and improve or adjust the model immediately as the new training data arrives. Model adjustment doesn’t require any model re-training on a separate compute cluster and re-deploying a new model, making ML-ops more straightforward and cost-effective.

    As usual, we would appreciate any feedback on this blog post. Do you have any ideas on how Debezium or change data capture can be helpful in this area? What would be helpful to investigate, whether integration with another ML framework, integration with a specific ML feature store, etc.? In case you have any input any this regard, don’t hesitate to reach out to us on the Zulip chat, mailing list or you can transform your ideas directly into Jira feature requests.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/10/03/debezium-2-4-final-released/index.html b/blog/2023/10/03/debezium-2-4-final-released/index.html index 57cc1dd928..3c0d43dba7 100644 --- a/blog/2023/10/03/debezium-2-4-final-released/index.html +++ b/blog/2023/10/03/debezium-2-4-final-released/index.html @@ -44,4 +44,4 @@ "openlogreplicator.host": "<host>", (2) "openlogreplicator.port": "<port>" (3)
    1 The source alias defined in the OpenLogReplicator.json configuration that is to be used.
    2 The host that is running the OpenLogReplicator.
    3 The port the OpenLogReplicator is listening on.

    When the connector starts and begins to stream, it will connect to the OpenLogReplicator process' network endpoint, negotiate the connection with the serialization process, and then will begin to receive redo log entries.

    We will have another blog post that goes over OpenLogReplicator in more detail in the coming weeks leading up to the final release, but in the meantime feel free to experiment with the new ingestion method as we would love to hear your feedback.

    As this ingestion method is experimental, there are a few known limitations, please review the connector documentation for details.

    XML and RAW data types

    Debezium 2.4 supports several new Oracle data types, which include XML_TYPE and RAW (DBZ-3605). Two new Oracle dependencies were necessary to support XML: xdb and xmlparserv2. These dependencies are not redistributable, so they’re not included in the connector plugin archive by default, much like the connector’s driver. You must obtain these directly from Maven Central or oracle, just like the driver dependency.

    In addition, XML works similarly to CLOB and BLOB data types; therefore, the connector must be configured with lob.enabled set to true to ingest XML changes. We’d love to hear your feedback on this new feature as it’s been requested for quite some time.

    SQL Server

    Heartbeat improvements

    It’s not an uncommon situation for a database to go for a period of time without there being any relevant changes, whether that is due to inactivity or changes that do occur being of no interest to the connector based on configuration. In these cases, it’s critical that offset metadata managed by the connector remains synchronized with the offset backing store during these periods so that a restart of the connector works as expected.

    With Debezium 2.4, if a SQL Server change capture loop does not find any changes or the changes that did occur are not of any relevance to the connector, the connector will continue to emit heartbeat events when enabled. This should improve the reliability of the offsets stored in the offset backing store across a variety of use cases.

    JDBC

    Improved table naming strategy

    Nicholas Fwang added the ability to reference values from the change event’s source information block as a part of the connector’s configuration property table.name.format. If you want to reference such fields, simply use ${source.<field-name>} in the configuration, and the field’s value will be used (DBZ-6595).

    Header-based primary keys

    Roman Kudryashov contributed the ability to resolve a row’s primary key from a header defined on the change event. To use this new feature, specify the connector configuration property primary.key.mode as record_header. If the header value is a primitive type, you will need to define a primary.key.fields configuration similar to how you would if the event’s record key was a primitive. If the header value is a struct type, all fields of the structure will be used by default, but specifying the primary.key.fields property allows you to choose a subset of fields from the header as the key (DBZ-6602).

    SQL Server identity inserts

    Each database handles the insertion of values into an identity-based column differently. With SQL Server, this requires the explicit enablement of IDENTITY_INSERT prior to the insert and the disabling of this feature afterward. With Debezium 2.4, the Debezium JDBC sink connector provides support for this in the target database.

    In order to take advantage of identity-based inserts, the JDBC sink connector must be configured with a new dialect-based property called dialect.sqlserver.identity.inserts, which can be set to true or false. By default, this feature is set to false and must be enabled if you wish to insert into identity-based columns.

    When enabled, all insert and upsert operations will be wrapped as follows:

    SET IDENTITY_INSERT <table-name> ON;
     <the insert or upsert statement>
    -SET IDENTITY_INSERT <table-name> OFF;

    Spanner

    Await initialization task timeout

    It was possible due to certain conditions that a Spanner connector may not advance from the START_INITIAL_SYNC state during initialization. After investigation by Nancy Xu, a new configuration option was introduced to supply a configurable timeout. This can be done by setting connector.spanner.task.await.initialization.timeout to the desired number of milliseconds.

    GKE workload identity support

    Google Kubernetes Engine (GKE) supports identity workloads, allowing you to use a more secure authentication mechanism than the traditional JSON-based keys. In Debezium 2.4, when no JSON key is explicitly set, the Spanner connector will now automatically default to GKE workload identity authentication. Thanks to laughingman7743 for this effort as a part of DBZ-6885.

    UI

    Connector Metrics

    The Debezium UI project allows you to easily deploy any Debezium connector onto Kafka Connect using a web-based interface. This release has improved the interface by including several connector metrics on the main connector listing view. We’d love your feedback on this change and welcome any suggestions on other metrics you may find useful (DBZ-5321).

    Examples

    Offset editor example

    Users often express the need to manipulate connector offsets for various reasons. This can often be very difficult for those who may not be familiar with Kafka’s CLI tools or Java if you use Debezium Server. Thanks to a contribution (DBZ-6338) by Nathan Smit, you can now use an editor to manipulate the offsets from the command line or a web-based interface.

    Head to our examples repository and follow the README.md to get started.

    Other changes

    Altogether, 15 issues were fixed in this release and a total of 231 issues across all the Debezium 2.4 releases.

    • Debezium Outbox not working with CloudEventsConverter DBZ-3642

    • Incremental snapshot data-collections are not deduplicated DBZ-6787

    • MongoDB connector no longer requires cluster-wide privileges DBZ-6888

    • Timezone Transformation can’t work DBZ-6940

    • MySQL Kafka Signalling documentation is incorrect DBZ-6941

    • Infinite loop when using OR condition in additional-condition DBZ-6956

    • Filter out specified DDL events logic is reverted DBZ-6966

    • DDL parser does not support NOCOPY keyword DBZ-6971

    • Decrease time spent in handling rebalance events DBZ-6974

    • ParsingException (MySQL/MariaDB): User specification with whitespace DBZ-6978

    • RecordsStreamProducerIT#shouldReceiveChangesForInfinityNumericWithInfinity fails on Postgres < 14 DBZ-6986

    • PostgresConnectorIT#shouldAddNewFieldToSourceInfo may fail as the schema may not exists DBZ-6987

    Outlook & What’s next?

    Debezium 2.4 was a feature packed milestone for the team, so after a few drinks and celebration, the plan is to turn our focus toward what is ahead for the 2.5 release in mid-December. We already had our first Debezium Community meeting, discussed our road map, and we’re more than eager to get started.

    If you have any ideas or suggestions for what you’d like to see included in Debezium 2.5, please provide that feedback on our mailing list or in our Zulip chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +SET IDENTITY_INSERT <table-name> OFF;

    Spanner

    Await initialization task timeout

    It was possible due to certain conditions that a Spanner connector may not advance from the START_INITIAL_SYNC state during initialization. After investigation by Nancy Xu, a new configuration option was introduced to supply a configurable timeout. This can be done by setting connector.spanner.task.await.initialization.timeout to the desired number of milliseconds.

    GKE workload identity support

    Google Kubernetes Engine (GKE) supports identity workloads, allowing you to use a more secure authentication mechanism than the traditional JSON-based keys. In Debezium 2.4, when no JSON key is explicitly set, the Spanner connector will now automatically default to GKE workload identity authentication. Thanks to laughingman7743 for this effort as a part of DBZ-6885.

    UI

    Connector Metrics

    The Debezium UI project allows you to easily deploy any Debezium connector onto Kafka Connect using a web-based interface. This release has improved the interface by including several connector metrics on the main connector listing view. We’d love your feedback on this change and welcome any suggestions on other metrics you may find useful (DBZ-5321).

    Examples

    Offset editor example

    Users often express the need to manipulate connector offsets for various reasons. This can often be very difficult for those who may not be familiar with Kafka’s CLI tools or Java if you use Debezium Server. Thanks to a contribution (DBZ-6338) by Nathan Smit, you can now use an editor to manipulate the offsets from the command line or a web-based interface.

    Head to our examples repository and follow the README.md to get started.

    Other changes

    Altogether, 15 issues were fixed in this release and a total of 231 issues across all the Debezium 2.4 releases.

    • Debezium Outbox not working with CloudEventsConverter DBZ-3642

    • Incremental snapshot data-collections are not deduplicated DBZ-6787

    • MongoDB connector no longer requires cluster-wide privileges DBZ-6888

    • Timezone Transformation can’t work DBZ-6940

    • MySQL Kafka Signalling documentation is incorrect DBZ-6941

    • Infinite loop when using OR condition in additional-condition DBZ-6956

    • Filter out specified DDL events logic is reverted DBZ-6966

    • DDL parser does not support NOCOPY keyword DBZ-6971

    • Decrease time spent in handling rebalance events DBZ-6974

    • ParsingException (MySQL/MariaDB): User specification with whitespace DBZ-6978

    • RecordsStreamProducerIT#shouldReceiveChangesForInfinityNumericWithInfinity fails on Postgres < 14 DBZ-6986

    • PostgresConnectorIT#shouldAddNewFieldToSourceInfo may fail as the schema may not exists DBZ-6987

    Outlook & What’s next?

    Debezium 2.4 was a feature packed milestone for the team, so after a few drinks and celebration, the plan is to turn our focus toward what is ahead for the 2.5 release in mid-December. We already had our first Debezium Community meeting, discussed our road map, and we’re more than eager to get started.

    If you have any ideas or suggestions for what you’d like to see included in Debezium 2.5, please provide that feedback on our mailing list or in our Zulip chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/10/05/Debezium-JMX-signaling-and-notifications/index.html b/blog/2023/10/05/Debezium-JMX-signaling-and-notifications/index.html index 309dbf8a46..ce5ed45aca 100644 --- a/blog/2023/10/05/Debezium-JMX-signaling-and-notifications/index.html +++ b/blog/2023/10/05/Debezium-JMX-signaling-and-notifications/index.html @@ -1223,4 +1223,4 @@ ], "timestamp": 1695652278, "status": 200 -}

    You can see that now we have also the notification about inventory.products table incremental snapshot that we have sent through REST api

    Conclusion

    In this third installment of our series on Debezium Signaling and Notifications, we’ve learned how to enable and manage both signaling and notifications using JMX and Jolokia. Signaling lets you dynamically control Debezium’s behavior, while notifications keep you informed about critical events. By harnessing these capabilities along with Jolokia, you can effectively manage, monitor, and interact with your data streaming workflows, ensuring that you always control Debezium.

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    You can see that now we have also the notification about inventory.products table incremental snapshot that we have sent through REST api

    Conclusion

    In this third installment of our series on Debezium Signaling and Notifications, we’ve learned how to enable and manage both signaling and notifications using JMX and Jolokia. Signaling lets you dynamically control Debezium’s behavior, while notifications keep you informed about critical events. By harnessing these capabilities along with Jolokia, you can effectively manage, monitor, and interact with your data streaming workflows, ensuring that you always control Debezium.

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/10/19/Debezium-Operator-Takes-off-to-the-Clouds/index.html b/blog/2023/10/19/Debezium-Operator-Takes-off-to-the-Clouds/index.html index ffafbe0094..502da3a61a 100644 --- a/blog/2023/10/19/Debezium-Operator-Takes-off-to-the-Clouds/index.html +++ b/blog/2023/10/19/Debezium-Operator-Takes-off-to-the-Clouds/index.html @@ -167,4 +167,4 @@ --bootstrap-server localhost:9092 \ --from-beginning \ --property print.key=true \ - --topic inventory.inventory.orders

    The Future and Our Request

    This is it for now. Before the operator gets full support, we intend to provide more detailed documentation and the ability to configure further the deployment with various things, such as custom pull secrets to support customized Debezium Server images stored in secured registries.

    There are further plans to improve the structure of the DebeziumServer resources, provide the ability to assemble tailored distribution of Debezium Server declaratively, and maybe even improve our integration with Knative eventing. We are also planning improvements to the embedded engine and, consequently, the Debezium Server, which will one day allow us to take advantage of the horizontal scaling capabilities of Kubernetes.

    You can Help us!

    We want to ask our wonderful Debezium community to test the operator and let us know what you like and dislike and what features you miss. This way, we can shape this component according to your needs, and together, we will bring Debezium closer to providing cloud-native CDC capabilities.

    Čecháček Jakub

    Jakub is a Principal Software Engineer at Red Hat. He lives in Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + --topic inventory.inventory.orders

    The Future and Our Request

    This is it for now. Before the operator gets full support, we intend to provide more detailed documentation and the ability to configure further the deployment with various things, such as custom pull secrets to support customized Debezium Server images stored in secured registries.

    There are further plans to improve the structure of the DebeziumServer resources, provide the ability to assemble tailored distribution of Debezium Server declaratively, and maybe even improve our integration with Knative eventing. We are also planning improvements to the embedded engine and, consequently, the Debezium Server, which will one day allow us to take advantage of the horizontal scaling capabilities of Kubernetes.

    You can Help us!

    We want to ask our wonderful Debezium community to test the operator and let us know what you like and dislike and what features you miss. This way, we can shape this component according to your needs, and together, we will bring Debezium closer to providing cloud-native CDC capabilities.

    Čecháček Jakub

    Jakub is a Principal Software Engineer at Red Hat. He lives in Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/10/26/debezium-2-5-alpha1-released/index.html b/blog/2023/10/26/debezium-2-5-alpha1-released/index.html index fb9c204026..fc3e8fe0d8 100644 --- a/blog/2023/10/26/debezium-2-5-alpha1-released/index.html +++ b/blog/2023/10/26/debezium-2-5-alpha1-released/index.html @@ -2,4 +2,4 @@ <groupId>io.debezium</groupId> <artifactId>debezium-connector-informix</artifactId> <version>2.5.0.Alpha1</version> -</dependency>

    If you would like contribute to the Informix connector, we have added a new repository under the Debezium organization, debezium-connector-informix.

    I’d like to thank Lars Johansson for this contribution and his collaboration with the team, kudos!

    MariaDB preview support

    The community has leveraged the MySQL connector as an alternative to capture changes from MariaDB for quite some time now; however that compatibility was primarily best-case effort.

    The Debezium 2.5 release stream aims to bring MariaDB to the forefront as a first-class connector by taking a very clear and methodological approach to incrementally check, validate, and eventually support MariaDB at the same capacity that we do MySQL. Our goal and hope is that we can do this within the scope of the MySQL connector proper; however, there is still quite a bit of ongoing investigation around GTID support that may influence the path forward.

    This first preview build of Debezium 2.5 has taken the first step, we’ve verified that the code works against a single MariaDB database deployment, the test suite passes and we’ve addressed any changes needed with the Binlog client to support that deployment. Our next steps is to look into GTID support, which MariaDB supports but using an approach that isn’t compatible with MySQL.

    Stay tuned for future builds as we continue to expand on this and we certainly welcome any early feedback.

    Oracle Streaming Metrics Changes

    In previous builds of Debezium, there was a single Oracle streaming metrics bean that exposed all metrics options that spanned across all three streaming adapters. This often lead to some confusion about what metrics are applicable to which streaming adapter so we wanted to define a clear distinction in this case.

    With Debezium 2.5, the Oracle streaming metrics beans have been split into three different implementations, one for each adapter type. For observability stacks, this change should be completely transparent unless you were previously gathering a metric for one adapter type while using another. In this case you’ll find that metric is no longer available.

    Specifically for LogMiner users, several metrics have been renamed and the old metrics have been deprecated. While you will still be able to use the old metric names in Debezium 2.5, these are scheduled for removal in a future 2.7+ build. The metrics that were deprecated and renamed are as follows:

    Old/Deprecated Metric New Metric

    CurrentRedoLogFileName

    CurrentLogFileNames

    RedoLogStatus

    RedoLogStatuses

    SwitchCounter

    LogSwitchCounter

    FetchingQueryCount

    FetchQueryCount

    HoursToKeepTransactionInBuffer

    MillisecondsToKeepTransactionsInBuffer

    TotalProcessingTimeInMilliseconds

    TotalBatchProcessingTimeInMilliseconds

    RegisteredDmlCount

    TotalChangesCount

    MillisecondsToSleepBetweenMiningQuery

    SleepTimeInMilliseconds

    NetworkConnectionProblemsCounter

    No replacement

    Debezium Server Operator

    The Debezium Server Operator for Kubernetes has been actively improved in this preview release of Debezium 2.5. Several improvements include:

    • Ability to set image pull secrets in the CRDs DBZ-6962

    • Ability to set resource limits in the CRDs DBZ-7052

    • Published OLM bundle scripts to Maven Central DBZ-6995

    • Support OKD/OpenShift catalog in OperatorHub release script DBZ-7010

    • Display name and descriptions metadata available in OLM bundle DBZ-7011

    • New metrics endpoint for gathering metrics DBZ-7053

    As we continue to improve the Debezium Server Operator for Kubernetes, we’d love to get your feedback.

    Community connectors

    Additionally, there were several enhancements to our community led connectors for Google Spanner and Vitess, which include the following changes:

    • Support for Cloud Spanner emulator with the Spanner connector DBZ-6845

    • Resumable snapshot support for the Vitess connector DBZ-7050

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Adding Debezium Server example using MySQL and GCP PubSub DBZ-4471

    • Refactor ElapsedTimeStrategy DBZ-6778

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Single quote replication includes escaped quotes for N(CHAR/VARCHAR) columns DBZ-6975

    • Provide configuration option to exclude extension attributes from a CloudEvent DBZ-6982

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • Debezium doesn’t compile with JDK 21 DBZ-6992

    • OLM bundle version for GA releases is invalid DBZ-6994

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Remove deprecated embedded engine code DBZ-7013

    • Enable replication slot advance check DBZ-7015

    • Add configuration option to CloudEventsConverter to retrieve id and type from headers DBZ-7016

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • Use optional schema for Timezone Converter tests DBZ-7020

    • DDL statement couldn’t be parsed DBZ-7030

    • Blocking ad-hoc snapshot is not really blocking for MySQL DBZ-7035

    • Fake ROTATE event on connection restart cleans metadata DBZ-7037

    • Consolidate resource labels and annotations DBZ-7064

    What’s next?

    With the holiday season quickly approaching for many of us, you can expect that the release cadence for Debezium 2.5 to be shorter than usual. We intend to release often like always, but expect that we will likely only do one more alpha build before we began to transition into beta and release candidate builds. Our goal is to deliver Debezium 2.5.0.Final by mid-December, just in time for the holiday break.

    While this condensed timeline doesn’t give us lots of room, we have a lot still planned to include for Debezium 2.5. There is still lots of work to do on the MariaDB preview front, supporting GTID and multiple topology deployments. We’re also working on improving the parallelization experience with Debezium Engine, batch support for the JDBC sink connector, MongoDB improvements around large BSON documents, and much more. You can find all the details for our continued plans for Debezium 2.5 on our roadmap.

    Lastly, I’d like to remind everyone about the Debezium community meeting. I will be distributing details about the next meeting in the coming weeks, and I would urge folks to be on the look-out and try and stop by our virtual event in early December. It’s a great way to meet the engineers working on Debezium, ask questions in an AMA style format, and to get insights not only into what all is part of Debezium 2.5, but also what lies ahead with Debezium 2.6 and 2.7 for early next year!

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +</dependency>

    If you would like contribute to the Informix connector, we have added a new repository under the Debezium organization, debezium-connector-informix.

    I’d like to thank Lars Johansson for this contribution and his collaboration with the team, kudos!

    MariaDB preview support

    The community has leveraged the MySQL connector as an alternative to capture changes from MariaDB for quite some time now; however that compatibility was primarily best-case effort.

    The Debezium 2.5 release stream aims to bring MariaDB to the forefront as a first-class connector by taking a very clear and methodological approach to incrementally check, validate, and eventually support MariaDB at the same capacity that we do MySQL. Our goal and hope is that we can do this within the scope of the MySQL connector proper; however, there is still quite a bit of ongoing investigation around GTID support that may influence the path forward.

    This first preview build of Debezium 2.5 has taken the first step, we’ve verified that the code works against a single MariaDB database deployment, the test suite passes and we’ve addressed any changes needed with the Binlog client to support that deployment. Our next steps is to look into GTID support, which MariaDB supports but using an approach that isn’t compatible with MySQL.

    Stay tuned for future builds as we continue to expand on this and we certainly welcome any early feedback.

    Oracle Streaming Metrics Changes

    In previous builds of Debezium, there was a single Oracle streaming metrics bean that exposed all metrics options that spanned across all three streaming adapters. This often lead to some confusion about what metrics are applicable to which streaming adapter so we wanted to define a clear distinction in this case.

    With Debezium 2.5, the Oracle streaming metrics beans have been split into three different implementations, one for each adapter type. For observability stacks, this change should be completely transparent unless you were previously gathering a metric for one adapter type while using another. In this case you’ll find that metric is no longer available.

    Specifically for LogMiner users, several metrics have been renamed and the old metrics have been deprecated. While you will still be able to use the old metric names in Debezium 2.5, these are scheduled for removal in a future 2.7+ build. The metrics that were deprecated and renamed are as follows:

    Old/Deprecated Metric New Metric

    CurrentRedoLogFileName

    CurrentLogFileNames

    RedoLogStatus

    RedoLogStatuses

    SwitchCounter

    LogSwitchCounter

    FetchingQueryCount

    FetchQueryCount

    HoursToKeepTransactionInBuffer

    MillisecondsToKeepTransactionsInBuffer

    TotalProcessingTimeInMilliseconds

    TotalBatchProcessingTimeInMilliseconds

    RegisteredDmlCount

    TotalChangesCount

    MillisecondsToSleepBetweenMiningQuery

    SleepTimeInMilliseconds

    NetworkConnectionProblemsCounter

    No replacement

    Debezium Server Operator

    The Debezium Server Operator for Kubernetes has been actively improved in this preview release of Debezium 2.5. Several improvements include:

    • Ability to set image pull secrets in the CRDs DBZ-6962

    • Ability to set resource limits in the CRDs DBZ-7052

    • Published OLM bundle scripts to Maven Central DBZ-6995

    • Support OKD/OpenShift catalog in OperatorHub release script DBZ-7010

    • Display name and descriptions metadata available in OLM bundle DBZ-7011

    • New metrics endpoint for gathering metrics DBZ-7053

    As we continue to improve the Debezium Server Operator for Kubernetes, we’d love to get your feedback.

    Community connectors

    Additionally, there were several enhancements to our community led connectors for Google Spanner and Vitess, which include the following changes:

    • Support for Cloud Spanner emulator with the Spanner connector DBZ-6845

    • Resumable snapshot support for the Vitess connector DBZ-7050

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Adding Debezium Server example using MySQL and GCP PubSub DBZ-4471

    • Refactor ElapsedTimeStrategy DBZ-6778

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Single quote replication includes escaped quotes for N(CHAR/VARCHAR) columns DBZ-6975

    • Provide configuration option to exclude extension attributes from a CloudEvent DBZ-6982

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • Debezium doesn’t compile with JDK 21 DBZ-6992

    • OLM bundle version for GA releases is invalid DBZ-6994

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Remove deprecated embedded engine code DBZ-7013

    • Enable replication slot advance check DBZ-7015

    • Add configuration option to CloudEventsConverter to retrieve id and type from headers DBZ-7016

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • Use optional schema for Timezone Converter tests DBZ-7020

    • DDL statement couldn’t be parsed DBZ-7030

    • Blocking ad-hoc snapshot is not really blocking for MySQL DBZ-7035

    • Fake ROTATE event on connection restart cleans metadata DBZ-7037

    • Consolidate resource labels and annotations DBZ-7064

    What’s next?

    With the holiday season quickly approaching for many of us, you can expect that the release cadence for Debezium 2.5 to be shorter than usual. We intend to release often like always, but expect that we will likely only do one more alpha build before we began to transition into beta and release candidate builds. Our goal is to deliver Debezium 2.5.0.Final by mid-December, just in time for the holiday break.

    While this condensed timeline doesn’t give us lots of room, we have a lot still planned to include for Debezium 2.5. There is still lots of work to do on the MariaDB preview front, supporting GTID and multiple topology deployments. We’re also working on improving the parallelization experience with Debezium Engine, batch support for the JDBC sink connector, MongoDB improvements around large BSON documents, and much more. You can find all the details for our continued plans for Debezium 2.5 on our roadmap.

    Lastly, I’d like to remind everyone about the Debezium community meeting. I will be distributing details about the next meeting in the coming weeks, and I would urge folks to be on the look-out and try and stop by our virtual event in early December. It’s a great way to meet the engineers working on Debezium, ask questions in an AMA style format, and to get insights not only into what all is part of Debezium 2.5, but also what lies ahead with Debezium 2.6 and 2.7 for early next year!

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/11/10/debezium-2-5-alpha2-released/index.html b/blog/2023/11/10/debezium-2-5-alpha2-released/index.html index cac6555a86..0390121d20 100644 --- a/blog/2023/11/10/debezium-2-5-alpha2-released/index.html +++ b/blog/2023/11/10/debezium-2-5-alpha2-released/index.html @@ -1 +1 @@ - Debezium 2.5.0.Alpha2 Released

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    Breaking changes

    While we strive to avoid breaking changes, sometimes those changes are inevitable to evolve in the right direction. This release includes a variety of breaking changes.

    MongoDB default connection mode changed

    The upgrade to Debezium 2.5 brings a change to MongoDB’s default implementation. In previous builds, the default connection mode was replica_set; however with Debezium 2.5 this is now sharded. If you were connecting to a sharded cluster and not explicitly setting a connection mode; ergo, relying on the default behavior, you must review your connector configuration and make adjustments. (DBZ-7108)

    Overall, this change is part of larger effort to remove the replica_set mode entirely. Please be sure to review your connector configurations for all MongoDB connectors when upgrading.

    This breaking change invalidates existing connector offsets and a new snapshot will be triggered by default when upgrading. If a snapshot is not needed or wanted, you will need to adjust your connector configuration’s snapshot.mode accordingly.

    Debezium Embedded Engine Deprecated APIs removed

    Part of the team’s focus in Debezium 2.5 was to improve the Debezium Embedded Engine’s experience. With that goal in mind, we took this preview release as an opportunity to clean-up the embedded engine’s API.

    If your usage of the Debezium Embedded Engine utilized any of the previously deprecated APIs on EmbeddedEngine, you will find those methods have since been removed. (DBZ-7100) The recommended path forward is to make sure that you’re using the DebeziumEngine interface provided by the debezium-api artifact.

    MySQL 5.7 support now best-effort

    The MySQL community announced that MySQL 5.7 would enter its End of Life cycle at the end of October 2023, or just last month. This means that the MySQL community has no plans to continue offering security or bug fix patches for that edition of MySQL.

    In accordance with this upstream community news, Debezium too is making adjustments, like other vendors, around this recent news. To that end, starting with Debezium 2.5, we will no longer be testing nor supporting MySQL 5.7 if full capacity, thus MySQL 5.7 enters what we call "best-effort" support. (DBZ-6874)

    CloudEvents - configuration option renamed

    If you are presently using the CloudEvents converter to emit events that conform to the CloudEvents format, it’s important to note that the configuration option metadata.location was renamed to metadata.source. You will need to be sure to update your connector configurations to reflect this change with Debezium 2.5 and onward. (DBZ-7060)

    New features and improvements

    Debezium 2.5 also introduces quite a number of improvements, lets take a look at each of these individually.

    JDBC Sink Batch Support

    Debezium first introduced the JDBC sink connector in March 2023 as a part of Debezium 2.2. Over the last several months, this connector has seen numerous iterations to improve its stability, feature set, and capabilities. Debezium 2.5 builds atop of those efforts, introducing batch-writes. (DBZ-6317)

    In previous versions, the connector worked on each topic event separately; however, the new batch-write support mode will collect the events into buckets and write those changes to the target system using the fewest possible transaction boundaries as possible. This change increases the connector’s throughput capabilities and makes the interactions with the target database far more efficient.

    Seamless MongoDB large document handling

    Debezium has introduced several changes around large document processing in recent releases; however, those changes primarily focused on handling that use case with MongoDB 4 and 5. While these improvements certainly help for those older versions, the MongoDB community has introduced a way in MongoDB 6 to seamlessly deal with this at the database pipeline level.

    Debezium 2.5’s MongoDB connector now uses the $changeStreamSplitLargeEvent aggregation feature, introduced as part of MongoDB 6.0.9. This avoids the BSONObjectTooLarge exception when working with documents that would exceed the 16MB document size limit of MongoDB. This new feature is controlled by the oversize.handling.mode option, which defaults to fail. Please adjust this configuration if you would like to take advantage of this new, opt-in feature. (DBZ-6726)

    Debezium is simply utilizing an underlying feature of the MongoDB database. As such, the database still has some limitations discussed in the MongoDB documentation that could still lead to exceptions with large documents that don’t adhere to MongoDB’s split rules.

    MySQL 8.2 support

    The MySQL community recently released a new innovation release, MySQL 8.2.0 at the end of October 2023. This new release has been tested with Debezium and we’re happy to announce that we officially support this new innovation release. (DBZ-6873)

    SQL Server Notification Improvements

    Debezium for SQL Server works by reading the changes captured by the database in what are called capture instances. These instances can come and go based on a user’s needs, and it can be difficult to know if Debezium has concluded its own capture process for a given capture instance.

    Debezium 2.5 remedies this problem by emitting a new notification aggregate called Capture Instance, allowing any observer to realize when a capture instance is no longer in use by Debezium. This new notification includes a variety of connector details including the connector’s name along with the start, stop, and commit LSN values. (DBZ-7043)

    Redis Schema History Retries now Limited

    Debezium 2.5 introduces a new configuration option, schema.history.internal.redis.max.attempts designed to limit the number of retry attempts while connecting to a Redis database when it becomes unavailable, previously it simply retried forever. This new option defaults to 10 but is user configurable. (DBZ-7120)

    SQL Server Driver Updates

    SQL Serer 2019 introduced the ability to specify column-specific sensitivity classifications to provide better visibility and protections for sensitive data. Unfortunately, the current driver shipped with Debezium 2.4 and earlier does not support this feature. Debezium 2.5 introduces the latest 12.4.2 SQL Server driver so that users can now take advantage of this feature out of the box. (DBZ-7109)

    Debezium Server Kinesis Sink Improvements

    Debezium Server Kinesis users will be happy to note that there has been some reliability improvements with the sink adapter with Debezium 2.5. The Kinesis Sink will now automatically retry the delivery of a failed record up to a maximum of 5 attempts before the adapter triggers a failure. This should improve the sink adapter’s delivery reliability and help situations where a batch of changes may overload the sink’s endpoint. (DBZ-7032)

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Add (integration) tests for Oracle connector-specific Debezium Connect REST extension DBZ-6763

    • Intermittent failure of MongoDbReplicaSetAuthTest DBZ-6875

    • Connector frequently misses commit operations DBZ-6942

    • Missing events from Oracle 19c DBZ-6963

    • Mongodb tests in RHEL system testsuite are failing with DBZ 2.3.4 DBZ-6996

    • Use DebeziumEngine instead of EmbeddedEngine in the testsuite DBZ-7007

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Field exclusion does not work with events of removed fields DBZ-7058

    • Update transformation property "delete.tombstone.handling.mode" to debezium doc DBZ-7062

    • JDBC sink connector not working with CloudEvent DBZ-7065

    • JDBC connection leak when error occurs during processing DBZ-7069

    • Some server tests fail due to @com.google.inject.Inject annotation DBZ-7077

    • Add MariaDB driver for testing and distribution DBZ-7085

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    • HttpIT fails with "Unrecognized field subEvents" DBZ-7092

    • MySQL parser does not conform to arithmetical operation priorities DBZ-7095

    • VitessConnectorIT.shouldTaskFailIfColumnNameInvalid fails DBZ-7104

    • When RelationalBaseSourceConnector#validateConnection is called with invalid config [inside Connector#validate()] can lead to exceptions DBZ-7105

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    Altogether, 33 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Anatolii Popov, Anisha Mohanty, Bob Roldan, Chris Cranford, Harvey Yue, Ilyas Ahsan, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Ondrej Babec, Rafael Câmara, René Kerner, Roman Kudryashov, Vadzim Ramanenka, Vojtech Juranek, and 蔡灿材!

    What’s next?

    As mentioned in our last release announcement, the cadence for Debezium 2.5 is condensed due to the upcoming holiday season. The next preview release for Debezium 2.5 will be our first and most likely only Beta release, later this month. We plan to conclude the Debezium 2.5 release series with a release candidate most likely the first week of December and a final release mid-way through December, just before the holiday break.

    The team is also working on a maintenance release of Debezium 2.4, due out late this week. This update to Debezium 2.4 will bring a host of bug fixes and stability improvements already in Debezium 2.5 to the 2.4 release stream.

    We are also moving forward on our review and process for MariaDB support. There will likely be some news on this in the coming weeks as we begin to find a path forward around this particular advancement. The team is also continuing the work on the Debezium Engine improvements, and much more. You can find all the details for our continued plans for Debezium 2.5 on our roadmap.

    Lastly, there will be news later this week about the next Debezium community event. Please be on the look-out for this as we’d love to see as many of our community members drop by our virtual event in early December. it’s a great way to meet the engineers who work on Debezium, the community contributors, and ask questions and gain insights into what is all part of Debezium 2.5 and the path forward to 2.6 and 2.7 for next year.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.5.0.Alpha2 Released

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    Breaking changes

    While we strive to avoid breaking changes, sometimes those changes are inevitable to evolve in the right direction. This release includes a variety of breaking changes.

    MongoDB default connection mode changed

    The upgrade to Debezium 2.5 brings a change to MongoDB’s default implementation. In previous builds, the default connection mode was replica_set; however with Debezium 2.5 this is now sharded. If you were connecting to a sharded cluster and not explicitly setting a connection mode; ergo, relying on the default behavior, you must review your connector configuration and make adjustments. (DBZ-7108)

    Overall, this change is part of larger effort to remove the replica_set mode entirely. Please be sure to review your connector configurations for all MongoDB connectors when upgrading.

    This breaking change invalidates existing connector offsets and a new snapshot will be triggered by default when upgrading. If a snapshot is not needed or wanted, you will need to adjust your connector configuration’s snapshot.mode accordingly.

    Debezium Embedded Engine Deprecated APIs removed

    Part of the team’s focus in Debezium 2.5 was to improve the Debezium Embedded Engine’s experience. With that goal in mind, we took this preview release as an opportunity to clean-up the embedded engine’s API.

    If your usage of the Debezium Embedded Engine utilized any of the previously deprecated APIs on EmbeddedEngine, you will find those methods have since been removed. (DBZ-7100) The recommended path forward is to make sure that you’re using the DebeziumEngine interface provided by the debezium-api artifact.

    MySQL 5.7 support now best-effort

    The MySQL community announced that MySQL 5.7 would enter its End of Life cycle at the end of October 2023, or just last month. This means that the MySQL community has no plans to continue offering security or bug fix patches for that edition of MySQL.

    In accordance with this upstream community news, Debezium too is making adjustments, like other vendors, around this recent news. To that end, starting with Debezium 2.5, we will no longer be testing nor supporting MySQL 5.7 if full capacity, thus MySQL 5.7 enters what we call "best-effort" support. (DBZ-6874)

    CloudEvents - configuration option renamed

    If you are presently using the CloudEvents converter to emit events that conform to the CloudEvents format, it’s important to note that the configuration option metadata.location was renamed to metadata.source. You will need to be sure to update your connector configurations to reflect this change with Debezium 2.5 and onward. (DBZ-7060)

    New features and improvements

    Debezium 2.5 also introduces quite a number of improvements, lets take a look at each of these individually.

    JDBC Sink Batch Support

    Debezium first introduced the JDBC sink connector in March 2023 as a part of Debezium 2.2. Over the last several months, this connector has seen numerous iterations to improve its stability, feature set, and capabilities. Debezium 2.5 builds atop of those efforts, introducing batch-writes. (DBZ-6317)

    In previous versions, the connector worked on each topic event separately; however, the new batch-write support mode will collect the events into buckets and write those changes to the target system using the fewest possible transaction boundaries as possible. This change increases the connector’s throughput capabilities and makes the interactions with the target database far more efficient.

    Seamless MongoDB large document handling

    Debezium has introduced several changes around large document processing in recent releases; however, those changes primarily focused on handling that use case with MongoDB 4 and 5. While these improvements certainly help for those older versions, the MongoDB community has introduced a way in MongoDB 6 to seamlessly deal with this at the database pipeline level.

    Debezium 2.5’s MongoDB connector now uses the $changeStreamSplitLargeEvent aggregation feature, introduced as part of MongoDB 6.0.9. This avoids the BSONObjectTooLarge exception when working with documents that would exceed the 16MB document size limit of MongoDB. This new feature is controlled by the oversize.handling.mode option, which defaults to fail. Please adjust this configuration if you would like to take advantage of this new, opt-in feature. (DBZ-6726)

    Debezium is simply utilizing an underlying feature of the MongoDB database. As such, the database still has some limitations discussed in the MongoDB documentation that could still lead to exceptions with large documents that don’t adhere to MongoDB’s split rules.

    MySQL 8.2 support

    The MySQL community recently released a new innovation release, MySQL 8.2.0 at the end of October 2023. This new release has been tested with Debezium and we’re happy to announce that we officially support this new innovation release. (DBZ-6873)

    SQL Server Notification Improvements

    Debezium for SQL Server works by reading the changes captured by the database in what are called capture instances. These instances can come and go based on a user’s needs, and it can be difficult to know if Debezium has concluded its own capture process for a given capture instance.

    Debezium 2.5 remedies this problem by emitting a new notification aggregate called Capture Instance, allowing any observer to realize when a capture instance is no longer in use by Debezium. This new notification includes a variety of connector details including the connector’s name along with the start, stop, and commit LSN values. (DBZ-7043)

    Redis Schema History Retries now Limited

    Debezium 2.5 introduces a new configuration option, schema.history.internal.redis.max.attempts designed to limit the number of retry attempts while connecting to a Redis database when it becomes unavailable, previously it simply retried forever. This new option defaults to 10 but is user configurable. (DBZ-7120)

    SQL Server Driver Updates

    SQL Serer 2019 introduced the ability to specify column-specific sensitivity classifications to provide better visibility and protections for sensitive data. Unfortunately, the current driver shipped with Debezium 2.4 and earlier does not support this feature. Debezium 2.5 introduces the latest 12.4.2 SQL Server driver so that users can now take advantage of this feature out of the box. (DBZ-7109)

    Debezium Server Kinesis Sink Improvements

    Debezium Server Kinesis users will be happy to note that there has been some reliability improvements with the sink adapter with Debezium 2.5. The Kinesis Sink will now automatically retry the delivery of a failed record up to a maximum of 5 attempts before the adapter triggers a failure. This should improve the sink adapter’s delivery reliability and help situations where a batch of changes may overload the sink’s endpoint. (DBZ-7032)

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Add (integration) tests for Oracle connector-specific Debezium Connect REST extension DBZ-6763

    • Intermittent failure of MongoDbReplicaSetAuthTest DBZ-6875

    • Connector frequently misses commit operations DBZ-6942

    • Missing events from Oracle 19c DBZ-6963

    • Mongodb tests in RHEL system testsuite are failing with DBZ 2.3.4 DBZ-6996

    • Use DebeziumEngine instead of EmbeddedEngine in the testsuite DBZ-7007

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Field exclusion does not work with events of removed fields DBZ-7058

    • Update transformation property "delete.tombstone.handling.mode" to debezium doc DBZ-7062

    • JDBC sink connector not working with CloudEvent DBZ-7065

    • JDBC connection leak when error occurs during processing DBZ-7069

    • Some server tests fail due to @com.google.inject.Inject annotation DBZ-7077

    • Add MariaDB driver for testing and distribution DBZ-7085

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    • HttpIT fails with "Unrecognized field subEvents" DBZ-7092

    • MySQL parser does not conform to arithmetical operation priorities DBZ-7095

    • VitessConnectorIT.shouldTaskFailIfColumnNameInvalid fails DBZ-7104

    • When RelationalBaseSourceConnector#validateConnection is called with invalid config [inside Connector#validate()] can lead to exceptions DBZ-7105

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    Altogether, 33 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Anatolii Popov, Anisha Mohanty, Bob Roldan, Chris Cranford, Harvey Yue, Ilyas Ahsan, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Ondrej Babec, Rafael Câmara, René Kerner, Roman Kudryashov, Vadzim Ramanenka, Vojtech Juranek, and 蔡灿材!

    What’s next?

    As mentioned in our last release announcement, the cadence for Debezium 2.5 is condensed due to the upcoming holiday season. The next preview release for Debezium 2.5 will be our first and most likely only Beta release, later this month. We plan to conclude the Debezium 2.5 release series with a release candidate most likely the first week of December and a final release mid-way through December, just before the holiday break.

    The team is also working on a maintenance release of Debezium 2.4, due out late this week. This update to Debezium 2.4 will bring a host of bug fixes and stability improvements already in Debezium 2.5 to the 2.4 release stream.

    We are also moving forward on our review and process for MariaDB support. There will likely be some news on this in the coming weeks as we begin to find a path forward around this particular advancement. The team is also continuing the work on the Debezium Engine improvements, and much more. You can find all the details for our continued plans for Debezium 2.5 on our roadmap.

    Lastly, there will be news later this week about the next Debezium community event. Please be on the look-out for this as we’d love to see as many of our community members drop by our virtual event in early December. it’s a great way to meet the engineers who work on Debezium, the community contributors, and ask questions and gain insights into what is all part of Debezium 2.5 and the path forward to 2.6 and 2.7 for next year.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/11/16/debezium-2-4-1-final-released/index.html b/blog/2023/11/16/debezium-2-4-1-final-released/index.html index 8f576dd800..10fd3bded8 100644 --- a/blog/2023/11/16/debezium-2-4-1-final-released/index.html +++ b/blog/2023/11/16/debezium-2-4-1-final-released/index.html @@ -15,4 +15,4 @@ runtime: jmx: enabled: true - port: 1099

    The endpoint currently does not support authentication nor SSL but those are on the road-map and will be added as a supplemental change to this feature in a future release.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Missing events from Oracle 19c DBZ-6963

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • OLM bundle version for GA releases is invalid DBZ-6994

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • Use optional schema for Timezone Converter tests DBZ-7020

    • DDL statement couldn’t be parsed DBZ-7030

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Update operator dependencies and add qosdk platform bom DBZ-7048

    • Field exclusion does not work with events of removed fields DBZ-7058

    • Consolidate resource labels and annotations DBZ-7064

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    Altogether, 30 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Anisha Mohanty, Bertrand Paquet, Bob Roldan, Chris Cranford, David Remy, Don Seiler, Harvey Yue, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Nancy Xu, Robert Roldan, Sergey Eizner, Thomas Thornton, Vojtech Juranek, and adityajain311995!

    What’s next?

    As mentioned in our last release announcement, the cadence for Debezium 2.5 is condensed due to the upcoming holiday season. The next preview release for Debezium 2.5 will be our first and most likely only Beta release, later this month. We plan to conclude the Debezium 2.5 release series with a release candidate most likely the first week of December and a final release mid-way through December, just before the holiday break.

    We are also moving forward on our review and process for MariaDB support. There will likely be some news on this in the coming weeks as we begin to find a path forward around this particular advancement. The team is also continuing the work on the Debezium Engine improvements, and much more. You can find all the details for our continued plans for Debezium 2.5 on our roadmap.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + port: 1099

    The endpoint currently does not support authentication nor SSL but those are on the road-map and will be added as a supplemental change to this feature in a future release.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Missing events from Oracle 19c DBZ-6963

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • OLM bundle version for GA releases is invalid DBZ-6994

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • Use optional schema for Timezone Converter tests DBZ-7020

    • DDL statement couldn’t be parsed DBZ-7030

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Update operator dependencies and add qosdk platform bom DBZ-7048

    • Field exclusion does not work with events of removed fields DBZ-7058

    • Consolidate resource labels and annotations DBZ-7064

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    Altogether, 30 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Anisha Mohanty, Bertrand Paquet, Bob Roldan, Chris Cranford, David Remy, Don Seiler, Harvey Yue, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Nancy Xu, Robert Roldan, Sergey Eizner, Thomas Thornton, Vojtech Juranek, and adityajain311995!

    What’s next?

    As mentioned in our last release announcement, the cadence for Debezium 2.5 is condensed due to the upcoming holiday season. The next preview release for Debezium 2.5 will be our first and most likely only Beta release, later this month. We plan to conclude the Debezium 2.5 release series with a release candidate most likely the first week of December and a final release mid-way through December, just before the holiday break.

    We are also moving forward on our review and process for MariaDB support. There will likely be some news on this in the coming weeks as we begin to find a path forward around this particular advancement. The team is also continuing the work on the Debezium Engine improvements, and much more. You can find all the details for our continued plans for Debezium 2.5 on our roadmap.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/12/04/debezium-2-5-beta1-released/index.html b/blog/2023/12/04/debezium-2-5-beta1-released/index.html index 74f4fa3710..0b90dbb4f0 100644 --- a/blog/2023/12/04/debezium-2-5-beta1-released/index.html +++ b/blog/2023/12/04/debezium-2-5-beta1-released/index.html @@ -28,4 +28,4 @@ "field.exclude.list": "customers:address" }

    The format of the include/exclude for fields is [<topic-name>:]<field-name>, where the topic-name is optionally and can be omitted if you want to avoid writing the address field for all events. Please see the JDBC sink connector configuration documentation for more details.

    Debezium Server - EventHubs partitioning

    In earlier versions of Debezium Server, users could specify a fixed partition-id to stream all changes to a single partition or provide a static partition-key that will be set on all batch operations, which ultimately lends itself to streaming all changes to the same target partition. There are situations where this may be helpful, but it more often leads to a performance concerns for downstream processing.

    Debezium 2.5 adjusts this behavior in order to improve performance. By default, when neither a partitionid or partitionkey is defined, the EventHub sink will send events using a round-robin technique to all available partitions. Events can be forced into a single, fixed partition by specifying a partitionid. Alternatively, the partitionkey can be provided to supply a fixed partition key that will be used to route all events to a specific partition.

    If additional partition routing requirements are necessary, you can now combine the PartitionRouting SMT accomplish such tasks. For more details, please see the Event Hubs documentation.

    Debezium Server - RabbitMQ Streams sink

    RabbitMQ introduced Streams in version 3.9, which utilizes a fast and efficient protocol that can be combined with AMQP 0.9.1 to support large fan-outs, replay and time travel, and large data sets with extremely high throughput. Debezium 2.5 takes advantage of this new Streams implementation by introducing a new native Streams implementation (DBZ-6703). In order to get started with this new implementation, configure the Debezium Server sink as follows:

    debezium.sink.type=rabbitmqstream
     debezium.sink.rabbitmqstream.connection.host=<hostname of RabbitMQ>
    -debezium.sink.rabbitmqstream.connection.port=<port of RabbitMQ>

    Additionally, if you need to pass any other connection parameters to the RabbitMQ connection, you can do so by adding those to the configuration with the prefix debezium.sink.rabbitmqstream.connection. to pass through any config properties.

    Please see the Debezium Server RabbitMQ documentation for more details.

    Debezium Operator - Service Account for CRDs

    In previous versions of Debezium, it was not possible to use a service account named differently than the predefined one. This made the process a tad bit cumbersome for users because while you could grant roles and authorization to this predefined account separately, it meant you needed to use this predefined service account rather than one that you may already wish to use.

    Debezium 2.5 simplifies this process, allowing you to now use your own, custom service account (DBZ-7111).

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Generate sundrio fluent builders for operator model DBZ-6550

    • Convert operator source into multi module project DBZ-6551

    • Implement "validate filters" endpoint in connector-specific Connect REST extensions DBZ-6762

    • Test Avro adjustment for MongoDb connector and ExtractNewDocumentState SMT DBZ-6809

    • Implement IT tests against Cloud Spanner emulator in main repo. DBZ-6906

    • The DefaultDeleteHandlingStrategy couldn’t add the rewrite "__deleted" field to a non-struct value DBZ-7066

    • Implement strategy pattern for MariaDB and MySQL differences DBZ-7083

    • Debezium server has no default for offset.flush.interval.ms DBZ-7099

    • Failed to authenticate to the MySQL database after snapshot DBZ-7132

    • Run MySQL CI builds in parallel DBZ-7135

    • Failure reading CURRENT_TIMESTAMP on Informix 12.10 DBZ-7137

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    • outbox.EventRouter SMT throws NullPointerException when there is a whitespace in fields.additional.placement value DBZ-7142

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UPDATE) DBZ-7152

    • Add matrix strategy to workflows DBZ-7154

    • Add Unit Tests for ServiceAccountDependent Class in Debezium Operator Repository DBZ-7155

    • JsonSerialisation is unable to process changes from sharded collections with composite sharding key DBZ-7157

    • Log sequence check should treat each redo thread independently DBZ-7158

    • Fail fast during deserialization if a value is not a CloudEvent DBZ-7159

    • Correctly calculate Max LSN DBZ-7175

    • Upgrade to Infinispan 14.0.20 DBZ-7187

    • Upgrade Outbox Extension to Quarkus 3.5.3 DBZ-7188

    • Fix DebeziumMySqlConnectorResource not using the new MySQL adatper structure to support different MySQL flavors DBZ-7179

    • Parsing MySQL indexes for JSON field fails, when casting is used with types double and float DBZ-7189

    • Unchanged toasted array columns are substituted with unavailable.value.placeholder, even when REPLICA IDENTITY FULL is configured. DBZ-7193

    • Enable ability to stream changes against Oracle 23c for LogMiner DBZ-7194

    • Add modify range_partitions to modify_table_partition rule in parsing PL/SQL DBZ-7196

    • MongoDB streaming pauses for Blocking Snapshot only when there is no event DBZ-7206

    • Handle Drop Tablespace in PL/SQL DBZ-7208

    • Upgrade logback to 1.2.12 DBZ-7209

    • NPE on AbstractInfinispanLogMinerEventProcessor.logCacheStats DBZ-7211

    What’s next?

    We have about three weeks before the team takes a break for the holidays, and so this will be our one and only beta release for Debezium 2.5. We intend to release our release candidate in the middle of next week and with the final due out the week before the holiday break.

    The team is also working on a maintenance release of Debezium 2.4, due out later this week. This update to Debezium 2.4 will bring a host of bug fixes and stability improvements already in Debezium 2.5 to the 2.4 release stream.

    The team is also finalizing our roadmap for 2024. I will have details later in the week posted on our website as well as a complete overview of Debezium 2.5 and our 2024 roadmap in detail as part of our Debezium Community Event, Tuesday December 12th at 10am ET. All are welcomed to attend on Google Meet.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +debezium.sink.rabbitmqstream.connection.port=<port of RabbitMQ>

    Additionally, if you need to pass any other connection parameters to the RabbitMQ connection, you can do so by adding those to the configuration with the prefix debezium.sink.rabbitmqstream.connection. to pass through any config properties.

    Please see the Debezium Server RabbitMQ documentation for more details.

    Debezium Operator - Service Account for CRDs

    In previous versions of Debezium, it was not possible to use a service account named differently than the predefined one. This made the process a tad bit cumbersome for users because while you could grant roles and authorization to this predefined account separately, it meant you needed to use this predefined service account rather than one that you may already wish to use.

    Debezium 2.5 simplifies this process, allowing you to now use your own, custom service account (DBZ-7111).

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Generate sundrio fluent builders for operator model DBZ-6550

    • Convert operator source into multi module project DBZ-6551

    • Implement "validate filters" endpoint in connector-specific Connect REST extensions DBZ-6762

    • Test Avro adjustment for MongoDb connector and ExtractNewDocumentState SMT DBZ-6809

    • Implement IT tests against Cloud Spanner emulator in main repo. DBZ-6906

    • The DefaultDeleteHandlingStrategy couldn’t add the rewrite "__deleted" field to a non-struct value DBZ-7066

    • Implement strategy pattern for MariaDB and MySQL differences DBZ-7083

    • Debezium server has no default for offset.flush.interval.ms DBZ-7099

    • Failed to authenticate to the MySQL database after snapshot DBZ-7132

    • Run MySQL CI builds in parallel DBZ-7135

    • Failure reading CURRENT_TIMESTAMP on Informix 12.10 DBZ-7137

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    • outbox.EventRouter SMT throws NullPointerException when there is a whitespace in fields.additional.placement value DBZ-7142

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UPDATE) DBZ-7152

    • Add matrix strategy to workflows DBZ-7154

    • Add Unit Tests for ServiceAccountDependent Class in Debezium Operator Repository DBZ-7155

    • JsonSerialisation is unable to process changes from sharded collections with composite sharding key DBZ-7157

    • Log sequence check should treat each redo thread independently DBZ-7158

    • Fail fast during deserialization if a value is not a CloudEvent DBZ-7159

    • Correctly calculate Max LSN DBZ-7175

    • Upgrade to Infinispan 14.0.20 DBZ-7187

    • Upgrade Outbox Extension to Quarkus 3.5.3 DBZ-7188

    • Fix DebeziumMySqlConnectorResource not using the new MySQL adatper structure to support different MySQL flavors DBZ-7179

    • Parsing MySQL indexes for JSON field fails, when casting is used with types double and float DBZ-7189

    • Unchanged toasted array columns are substituted with unavailable.value.placeholder, even when REPLICA IDENTITY FULL is configured. DBZ-7193

    • Enable ability to stream changes against Oracle 23c for LogMiner DBZ-7194

    • Add modify range_partitions to modify_table_partition rule in parsing PL/SQL DBZ-7196

    • MongoDB streaming pauses for Blocking Snapshot only when there is no event DBZ-7206

    • Handle Drop Tablespace in PL/SQL DBZ-7208

    • Upgrade logback to 1.2.12 DBZ-7209

    • NPE on AbstractInfinispanLogMinerEventProcessor.logCacheStats DBZ-7211

    What’s next?

    We have about three weeks before the team takes a break for the holidays, and so this will be our one and only beta release for Debezium 2.5. We intend to release our release candidate in the middle of next week and with the final due out the week before the holiday break.

    The team is also working on a maintenance release of Debezium 2.4, due out later this week. This update to Debezium 2.4 will bring a host of bug fixes and stability improvements already in Debezium 2.5 to the 2.4 release stream.

    The team is also finalizing our roadmap for 2024. I will have details later in the week posted on our website as well as a complete overview of Debezium 2.5 and our 2024 roadmap in detail as part of our Debezium Community Event, Tuesday December 12th at 10am ET. All are welcomed to attend on Google Meet.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/12/14/debezium-2-5-cr1-released/index.html b/blog/2023/12/14/debezium-2-5-cr1-released/index.html index b49d523f7e..d5915c300d 100644 --- a/blog/2023/12/14/debezium-2-5-cr1-released/index.html +++ b/blog/2023/12/14/debezium-2-5-cr1-released/index.html @@ -1 +1 @@ - Debezium 2.5.0.CR1 Released

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    Breaking changes

    While we strive to avoid breaking changes, sometimes those changes are inevitable to evolve the right direction. This release includes several breaking changes.

    Schema name for Cloud Event headers

    The schema name prefix and letter casing for Cloud Event headers was not consistent with payload name. The schema name was aligned so both headers and payload share th same namespace and follow the same rules for letter casing (DBZ-7216).

    MySQL BIT default length

    MySQL BIT datatype did not have an implicit length if any was not set. This is incorrect as the default length if none is provided is 1 (DBZ-7230).

    New features and improvements

    Debezium 2.5 also introduces more improvements and features, lets take a look at each individually.

    Re-select columns

    In some cases, because of the way that certain source databases function, when a Debezium connector emits a change event, the event might exclude values for specific column types. For example, values for TOAST columns in PostgreSQL, LOB columns in Oracle, or Extended String columns in Oracle Exadata, might all be excluded.

    Debezium 2.5 introduces the ReselectColumnsPostProcessor providing a way to re-select one or more columns from a database table and fetch the current state. You can configure the post processor to re-select the following column types:

    • null columns.

    • columns that contain the unavailable.value.placeholder sentinel value.

    Configuring a PostProcessor is similar to configuring a CustomConverter or Transformation, except that it works on the mutable payload’s Struct rather than the SourceRecord.

    Debezium Server - StreamNameMapper for Apache Kafka sink

    The Kafka sink behaviour can now be modified by a custom logic providing alternative implementations for specific functionalities. When the alternative implementations are not available then the default ones are used.

    For more details, please see the Apache Kafka Injection points.

    INSERT/DELETE semantics for incremental snapshot watermarking

    The property incremental.snapshot.watermarking.strategy has been introduced to let users choose the watermarking strategy to use during an incremental snapshot.

    The insert_insert (old behavior) approach lets Debezium creating two entries in the signaling data collection for each chunk during the snapshot to signal the opening of the snapshot window and another to mark its closure.

    On the other hand, with the insert_delete option, a single entry is written in the signaling data collection for each chunk at the beginning of the window. After completion, this entry is removed, and no corresponding entry is added to signify the closure of the snapshot window. This method aids in more efficient management of the signaling data collection.

    For more details, please see the Connector properties section of the connector of your interest.

    Debezium Server - AWS SQS sink

    Amazon Simple Queue Service (Amazon SQS) is a distributed message queuing service. It supports programmatic sending of messages via web service applications as a way to communicate over the Internet. SQS is intended to provide a highly scalable hosted message queue that resolves issues arising from the common producer–consumer problem or connectivity between producer and consumer.

    Debezium 2.5 offers the possibility to send events to Amazon SQS.

    Oracle LOB behavior

    Debezium 2.5 aligns LOB behavior in snapshot and streaming. When lob.enabled is set to false, the unavailable value placeholder will be explicitly included during snapshot to match the behavior of streaming.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Oracle abandoned transaction implementation bug causes OoM DBZ-7236

    • Add Grammar Oracle Truncate Cluster DBZ-7242

    • Length value is not removed when changing a column’s type DBZ-7251

    • MongoDB table/collection snapshot notification contain incorrect offsets DBZ-7252

    • Broken support for multi-namespace watching DBZ-7254

    • Add tracing logs to track execution time for Debezium JDBC connector DBZ-7217

    • Validate & clarify multiple archive log destination requirements for Oracle DBZ-7218

    • Upgrade logback to 1.2.13 DBZ-7232

    Altogether, 16 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Bob Roldan, Chris Cranford, Gunnar Morling, Harvey Yue, Ilyas Ahsan, Indra Shukla, Jakub Cechacek, Jiabao Sun, Jiri Kulhanek, Jiri Pechanec, Jordan Pittier, Mario Fiore Vitale, Nils Hartmann, Roman Kudryashov, Sebastiaan Knijnenburg, Tudor Plugaru, V K, and Zhongqiang Gong!

    What’s next?

    We have just over a week before the team takes a break for the holidays, and so we are preparing for Debezium 2.5 final release. We intend to release it the week before the holiday break.

    The team has also finalized the roadmap for 2024, here’s a sneak peek at some highlights (and remember, this is just the tip of the iceberg!):

    • Asynchronous-based processing in Debezium Engine

    • Official MariaDB connector

    • User-friendly offset manipulation (i.e, start at a specific position in the transaction logs)

    • Sink connector for MongoDB

    For more details, please check out our road map for all upcoming details around Debezium 2.6 and beyond.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.5.0.CR1 Released

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    Breaking changes

    While we strive to avoid breaking changes, sometimes those changes are inevitable to evolve the right direction. This release includes several breaking changes.

    Schema name for Cloud Event headers

    The schema name prefix and letter casing for Cloud Event headers was not consistent with payload name. The schema name was aligned so both headers and payload share th same namespace and follow the same rules for letter casing (DBZ-7216).

    MySQL BIT default length

    MySQL BIT datatype did not have an implicit length if any was not set. This is incorrect as the default length if none is provided is 1 (DBZ-7230).

    New features and improvements

    Debezium 2.5 also introduces more improvements and features, lets take a look at each individually.

    Re-select columns

    In some cases, because of the way that certain source databases function, when a Debezium connector emits a change event, the event might exclude values for specific column types. For example, values for TOAST columns in PostgreSQL, LOB columns in Oracle, or Extended String columns in Oracle Exadata, might all be excluded.

    Debezium 2.5 introduces the ReselectColumnsPostProcessor providing a way to re-select one or more columns from a database table and fetch the current state. You can configure the post processor to re-select the following column types:

    • null columns.

    • columns that contain the unavailable.value.placeholder sentinel value.

    Configuring a PostProcessor is similar to configuring a CustomConverter or Transformation, except that it works on the mutable payload’s Struct rather than the SourceRecord.

    Debezium Server - StreamNameMapper for Apache Kafka sink

    The Kafka sink behaviour can now be modified by a custom logic providing alternative implementations for specific functionalities. When the alternative implementations are not available then the default ones are used.

    For more details, please see the Apache Kafka Injection points.

    INSERT/DELETE semantics for incremental snapshot watermarking

    The property incremental.snapshot.watermarking.strategy has been introduced to let users choose the watermarking strategy to use during an incremental snapshot.

    The insert_insert (old behavior) approach lets Debezium creating two entries in the signaling data collection for each chunk during the snapshot to signal the opening of the snapshot window and another to mark its closure.

    On the other hand, with the insert_delete option, a single entry is written in the signaling data collection for each chunk at the beginning of the window. After completion, this entry is removed, and no corresponding entry is added to signify the closure of the snapshot window. This method aids in more efficient management of the signaling data collection.

    For more details, please see the Connector properties section of the connector of your interest.

    Debezium Server - AWS SQS sink

    Amazon Simple Queue Service (Amazon SQS) is a distributed message queuing service. It supports programmatic sending of messages via web service applications as a way to communicate over the Internet. SQS is intended to provide a highly scalable hosted message queue that resolves issues arising from the common producer–consumer problem or connectivity between producer and consumer.

    Debezium 2.5 offers the possibility to send events to Amazon SQS.

    Oracle LOB behavior

    Debezium 2.5 aligns LOB behavior in snapshot and streaming. When lob.enabled is set to false, the unavailable value placeholder will be explicitly included during snapshot to match the behavior of streaming.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Oracle abandoned transaction implementation bug causes OoM DBZ-7236

    • Add Grammar Oracle Truncate Cluster DBZ-7242

    • Length value is not removed when changing a column’s type DBZ-7251

    • MongoDB table/collection snapshot notification contain incorrect offsets DBZ-7252

    • Broken support for multi-namespace watching DBZ-7254

    • Add tracing logs to track execution time for Debezium JDBC connector DBZ-7217

    • Validate & clarify multiple archive log destination requirements for Oracle DBZ-7218

    • Upgrade logback to 1.2.13 DBZ-7232

    Altogether, 16 issues were fixed for this release. A big thank you to all the contributors from the community who worked on this release: Bob Roldan, Chris Cranford, Gunnar Morling, Harvey Yue, Ilyas Ahsan, Indra Shukla, Jakub Cechacek, Jiabao Sun, Jiri Kulhanek, Jiri Pechanec, Jordan Pittier, Mario Fiore Vitale, Nils Hartmann, Roman Kudryashov, Sebastiaan Knijnenburg, Tudor Plugaru, V K, and Zhongqiang Gong!

    What’s next?

    We have just over a week before the team takes a break for the holidays, and so we are preparing for Debezium 2.5 final release. We intend to release it the week before the holiday break.

    The team has also finalized the roadmap for 2024, here’s a sneak peek at some highlights (and remember, this is just the tip of the iceberg!):

    • Asynchronous-based processing in Debezium Engine

    • Official MariaDB connector

    • User-friendly offset manipulation (i.e, start at a specific position in the transaction logs)

    • Sink connector for MongoDB

    For more details, please check out our road map for all upcoming details around Debezium 2.6 and beyond.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. Until next time, stay warm out there!

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/12/20/JDBC-sink-connector-batch-support/index.html b/blog/2023/12/20/JDBC-sink-connector-batch-support/index.html index 7349361885..016a69acb6 100644 --- a/blog/2023/12/20/JDBC-sink-connector-batch-support/index.html +++ b/blog/2023/12/20/JDBC-sink-connector-batch-support/index.html @@ -7,4 +7,4 @@ `flight` longtext, `metar` longtext, `flight_distance` double DEFAULT NULL -)

    Test plan

    We planned to execute these tests:

    • 100K events from single table

      • MySQL batch vs without batch

    • 100K events from three different table

      • MySQL batch vs without batch

    • 1M events from single table

      • MySQL batch with batch size: 500, 1000, 5000, 10000 vs without batch

      • MySQL batch with batch size: 500, 1000, 5000, 10000 with JSONConverter

      • MySQL batch with batch size: 500, 1000, 5000, 10000 with Avro

      • MySQL batch with batch size: 500, 1000, 5000, 10000 with Avro and no index on destination table

    100k batch no batch
    Figure 1.  

    Figure 1 illustrates the total execution time required to process 100,000 events from a single table, comparing MySQL connector with and without the batch support.

    Despite the default values being set to 500 for both batch.size and consumer.max.poll.records, the observed actual size was reduced to 337 records due to payload size considerations.

    We can observe, as expected, that the Debezium JDBC connector with batch support is faster.

    100k 3 tables
    Figure 2.  

    In Figure 2, we observe that splitting 100,000 events into three tables does not impact the results. The Debezium JDBC connector with batch support remains faster compared to the non-batch version.

    1M batch no batch
    Figure 3.  

    In Figure 3, it is evident that the performance gain becomes more pronounced with 1,000,000 events. The Debezium JDBC connector with batch support took approximately 7 minutes to insert all events, with an average throughput of 2300 eps, while the process without batch support took 570 minutes (9.5 hours). Therefore, the Debezium JDBC connector with batch support is 79 times faster than the version without batch support.

    1M different batch size json
    Figure 4.  

    In Figure 4, we observe the behavior of the Debezium JDBC connector using the org.apache.kafka.connect.json.JsonConverter converter and writing to MySQL with different batch.size settings. While the initial differences are noticeable, it becomes apparent that the throughput continues to slow down. On average, all the batch.size configurations take about 7 minutes to process all events.

    This raised a concern for us. After conducting a thorough analysis (profiling), we identified another issue: event deserialization. With high probability, this was the cause of the non-scalability of batch.size settings.

    Although serialization improved scalability, we still lack an answer regarding the slowdown of EPS during the test run. One hypothesis could involve a certain type of buffer somewhere.

    1M different batch size avro
    Figure 5.  

    We then conducted experiments with Avro, and as depicted in Figure 5, the results show a significant improvement. As expected, processing 1,000,000 events with batch.size=500 is slower than with batch.size=10000. Notably, in our test configuration, the optimal value for batch.size is 1000, resulting in the fastest processing time.

    Although the results are better compared to JSON, there is still some performance degradation.

    To identify potential bottlenecks in the code, we added some metrics and found that the majority of time was spent executing batch statements on the database.

    Further investigation revealed that our table had an index defined on the primary key, which was slowing down the inserts.

    1M different batch size avro no index
    Figure 6.  

    in Figure 6 you can see the improved performance with Avro and without primary key index. It is also evident the performance boost with a high value of batch.size.

    Conclusion

    We’ve explored how adjusting the batch.size can enhance the performance of the Debezium JDBC connector and discussed the proper configuration for maximizing its benefits. Equally crucial is adhering to performance tips and general guidelines for efficient inserts tailored to your specific database.

    Here are a few examples:

    While some settings may be specific to certain databases, several general principles apply across the majority of them.

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +)

    Test plan

    We planned to execute these tests:

    • 100K events from single table

      • MySQL batch vs without batch

    • 100K events from three different table

      • MySQL batch vs without batch

    • 1M events from single table

      • MySQL batch with batch size: 500, 1000, 5000, 10000 vs without batch

      • MySQL batch with batch size: 500, 1000, 5000, 10000 with JSONConverter

      • MySQL batch with batch size: 500, 1000, 5000, 10000 with Avro

      • MySQL batch with batch size: 500, 1000, 5000, 10000 with Avro and no index on destination table

    100k batch no batch
    Figure 1.  

    Figure 1 illustrates the total execution time required to process 100,000 events from a single table, comparing MySQL connector with and without the batch support.

    Despite the default values being set to 500 for both batch.size and consumer.max.poll.records, the observed actual size was reduced to 337 records due to payload size considerations.

    We can observe, as expected, that the Debezium JDBC connector with batch support is faster.

    100k 3 tables
    Figure 2.  

    In Figure 2, we observe that splitting 100,000 events into three tables does not impact the results. The Debezium JDBC connector with batch support remains faster compared to the non-batch version.

    1M batch no batch
    Figure 3.  

    In Figure 3, it is evident that the performance gain becomes more pronounced with 1,000,000 events. The Debezium JDBC connector with batch support took approximately 7 minutes to insert all events, with an average throughput of 2300 eps, while the process without batch support took 570 minutes (9.5 hours). Therefore, the Debezium JDBC connector with batch support is 79 times faster than the version without batch support.

    1M different batch size json
    Figure 4.  

    In Figure 4, we observe the behavior of the Debezium JDBC connector using the org.apache.kafka.connect.json.JsonConverter converter and writing to MySQL with different batch.size settings. While the initial differences are noticeable, it becomes apparent that the throughput continues to slow down. On average, all the batch.size configurations take about 7 minutes to process all events.

    This raised a concern for us. After conducting a thorough analysis (profiling), we identified another issue: event deserialization. With high probability, this was the cause of the non-scalability of batch.size settings.

    Although serialization improved scalability, we still lack an answer regarding the slowdown of EPS during the test run. One hypothesis could involve a certain type of buffer somewhere.

    1M different batch size avro
    Figure 5.  

    We then conducted experiments with Avro, and as depicted in Figure 5, the results show a significant improvement. As expected, processing 1,000,000 events with batch.size=500 is slower than with batch.size=10000. Notably, in our test configuration, the optimal value for batch.size is 1000, resulting in the fastest processing time.

    Although the results are better compared to JSON, there is still some performance degradation.

    To identify potential bottlenecks in the code, we added some metrics and found that the majority of time was spent executing batch statements on the database.

    Further investigation revealed that our table had an index defined on the primary key, which was slowing down the inserts.

    1M different batch size avro no index
    Figure 6.  

    in Figure 6 you can see the improved performance with Avro and without primary key index. It is also evident the performance boost with a high value of batch.size.

    Conclusion

    We’ve explored how adjusting the batch.size can enhance the performance of the Debezium JDBC connector and discussed the proper configuration for maximizing its benefits. Equally crucial is adhering to performance tips and general guidelines for efficient inserts tailored to your specific database.

    Here are a few examples:

    While some settings may be specific to certain databases, several general principles apply across the majority of them.

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2023/12/21/debezium-2-5-final-released/index.html b/blog/2023/12/21/debezium-2-5-final-released/index.html index 9760b987d0..f501a5e920 100644 --- a/blog/2023/12/21/debezium-2-5-final-released/index.html +++ b/blog/2023/12/21/debezium-2-5-final-released/index.html @@ -32,4 +32,4 @@ <groupId>io.debezium</groupId> <artifactId>debezium-connector-informix</artifactId> <version>2.5.0.Final</version> -</dependency>

    If you would like contribute to the Informix connector, we have added a new repository under the Debezium organization, debezium-connector-informix.

    I’d like to thank Lars Johansson for this contribution and his collaboration with the team, kudos!

    MariaDB

    Preview support

    The community has leveraged the MySQL connector as an alternative to capture changes from MariaDB for quite some time now; however that compatibility was primarily best-case effort.

    The Debezium 2.5 release stream aims to bring MariaDB to the forefront as a first-class connector by taking a very clear and methodological approach to incrementally check, validate, and eventually support MariaDB at the same capacity that we do MySQL. Our goal and hope is that we can do this within the scope of the MySQL connector proper; however, there is still quite a bit of ongoing investigation around GTID support that may influence the path forward.

    This first preview build of Debezium 2.5 has taken the first step, we’ve verified that the code works against a single MariaDB database deployment, the test suite passes and we’ve addressed any changes needed with the Binlog client to support that deployment. Our next steps is to look into GTID support, which MariaDB supports but using an approach that isn’t compatible with MySQL.

    Stay tuned for future builds as we continue to expand on this and we certainly welcome any early feedback.

    GTID support

    Both MySQL and MariaDB support what is called Global Transaction Identifiers or GTIDs. These are used in replication to uniquely identify transaction(s) uniquely across a cluster. The implementation details between MySQL and MariaDB differ significantly and in earlier versions of Debezium, we only supported GTID with MySQL.

    With Debezium 2.5, we are taking another step forward by introducing GTID support for MariaDB as part of the MySQL connector offering. In order to take advantage of this behavior, you will need to use the MariaDB driver rather than the MySQL driver by using a JDBC connection prefixed as jdbc:mariadb rather than jdbc:mysql. By doing this, you can now take full advantage of working with MariaDB and GTID just like MySQL (DBZ-1482).

    Other changes

    Altogether, 7 issues were fixed in this release and a total of 169 issues across all the Debezium 2.5 releases.

    • Adding Debezium Server example using MySQL and GCP PubSub DBZ-4471

    • Refactor ElapsedTimeStrategy DBZ-6778

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Single quote replication includes escaped quotes for N(CHAR/VARCHAR) columns DBZ-6975

    • Provide configuration option to exclude extension attributes from a CloudEvent DBZ-6982

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • Debezium doesn’t compile with JDK 21 DBZ-6992

    • OLM bundle version for GA releases is invalid DBZ-6994

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Remove deprecated embedded engine code DBZ-7013

    • Enable replication slot advance check DBZ-7015

    • Add configuration option to CloudEventsConverter to retrieve id and type from headers DBZ-7016

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • Use optional schema for Timezone Converter tests DBZ-7020

    • DDL statement couldn’t be parsed DBZ-7030

    • Blocking ad-hoc snapshot is not really blocking for MySQL DBZ-7035

    • Fake ROTATE event on connection restart cleans metadata DBZ-7037

    • Consolidate resource labels and annotations DBZ-7064

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Add (integration) tests for Oracle connector-specific Debezium Connect REST extension DBZ-6763

    • Intermittent failure of MongoDbReplicaSetAuthTest DBZ-6875

    • Connector frequently misses commit operations DBZ-6942

    • Missing events from Oracle 19c DBZ-6963

    • Mongodb tests in RHEL system testsuite are failing with DBZ 2.3.4 DBZ-6996

    • Use DebeziumEngine instead of EmbeddedEngine in the testsuite DBZ-7007

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Field exclusion does not work with events of removed fields DBZ-7058

    • Update transformation property "delete.tombstone.handling.mode" to debezium doc DBZ-7062

    • JDBC sink connector not working with CloudEvent DBZ-7065

    • JDBC connection leak when error occurs during processing DBZ-7069

    • Some server tests fail due to @com.google.inject.Inject annotation DBZ-7077

    • Add MariaDB driver for testing and distribution DBZ-7085

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    • HttpIT fails with "Unrecognized field subEvents" DBZ-7092

    • MySQL parser does not conform to arithmetical operation priorities DBZ-7095

    • VitessConnectorIT.shouldTaskFailIfColumnNameInvalid fails DBZ-7104

    • When RelationalBaseSourceConnector#validateConnection is called with invalid config [inside Connector#validate()] can lead to exceptions DBZ-7105

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    • Generate sundrio fluent builders for operator model DBZ-6550

    • Convert operator source into multi module project DBZ-6551

    • Implement "validate filters" endpoint in connector-specific Connect REST extensions DBZ-6762

    • Test Avro adjustment for MongoDb connector and ExtractNewDocumentState SMT DBZ-6809

    • Implement IT tests against Cloud Spanner emulator in main repo. DBZ-6906

    • The DefaultDeleteHandlingStrategy couldn’t add the rewrite "__deleted" field to a non-struct value DBZ-7066

    • Implement strategy pattern for MariaDB and MySQL differences DBZ-7083

    • Debezium server has no default for offset.flush.interval.ms DBZ-7099

    • Failed to authenticate to the MySQL database after snapshot DBZ-7132

    • Run MySQL CI builds in parallel DBZ-7135

    • Failure reading CURRENT_TIMESTAMP on Informix 12.10 DBZ-7137

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    • outbox.EventRouter SMT throws NullPointerException when there is a whitespace in fields.additional.placement value DBZ-7142

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UPDATE) DBZ-7152

    • Add matrix strategy to workflows DBZ-7154

    • Add Unit Tests for ServiceAccountDependent Class in Debezium Operator Repository DBZ-7155

    • JsonSerialisation is unable to process changes from sharded collections with composite sharding key DBZ-7157

    • Log sequence check should treat each redo thread independently DBZ-7158

    • Fail fast during deserialization if a value is not a CloudEvent DBZ-7159

    • Correctly calculate Max LSN DBZ-7175

    • Upgrade to Infinispan 14.0.20 DBZ-7187

    • Upgrade Outbox Extension to Quarkus 3.5.3 DBZ-7188

    • Fix DebeziumMySqlConnectorResource not using the new MySQL adatper structure to support different MySQL flavors DBZ-7179

    • Parsing MySQL indexes for JSON field fails, when casting is used with types double and float DBZ-7189

    • Unchanged toasted array columns are substituted with unavailable.value.placeholder, even when REPLICA IDENTITY FULL is configured. DBZ-7193

    • Enable ability to stream changes against Oracle 23c for LogMiner DBZ-7194

    • Add modify range_partitions to modify_table_partition rule in parsing PL/SQL DBZ-7196

    • MongoDB streaming pauses for Blocking Snapshot only when there is no event DBZ-7206

    • Handle Drop Tablespace in PL/SQL DBZ-7208

    • Upgrade logback to 1.2.12 DBZ-7209

    • NPE on AbstractInfinispanLogMinerEventProcessor.logCacheStats DBZ-7211

    • Oracle abandoned transaction implementation bug causes OoM DBZ-7236

    • Add Grammar Oracle Truncate Cluster DBZ-7242

    • Length value is not removed when changing a column’s type DBZ-7251

    • MongoDB table/collection snapshot notification contain incorrect offsets DBZ-7252

    • Broken support for multi-namespace watching DBZ-7254

    • Add tracing logs to track execution time for Debezium JDBC connector DBZ-7217

    • Validate & clarify multiple archive log destination requirements for Oracle DBZ-7218

    • Upgrade logback to 1.2.13 DBZ-7232

    • Add configuration option to CloudEventsConverter to customize schema type name DBZ-7235

    • Support persistent history for snapshot requests for the kafka signal topic. DBZ-7164

    • Change metrics endpoint of Connect REST Extensions to use the MBeanServerv directly instead of HTTP calls to the Jolokia endpoint DBZ-7177

    • Metrics endpoint must handle connectors with multiple tasks (SQL Server) DBZ-7178

    • DDL GRANT statement couldn’t be parsed DBZ-7213

    • Debezium Oracle plugin 2.5.0 Beta does not support Oracle 11g DBZ-7257

    • Error during snapshot with multiple snapshot threads will not properly abort snasphostting DBZ-7264

    • MySQL RDS UPDATE queries not ignored DBZ-7271

    • Leaking JDBC connections DBZ-7275

    • IncrementalSnapshotCaseSensitiveIT#insertDeleteWatermarkingStrategy fails DBZ-7276

    • Debezium MySQL could not parse certain grant privileges. DBZ-7277

    • Add PL/SQL Parser for Create Table Memoptimize DBZ-7279

    • Support for Creating EDITIONABLE or NONEDITIONABLE Packages DBZ-7283

    • Add PL/SQL Parser for Alter Table Memoptimize DBZ-7268

    • Move metrics endpoint from UI backend to the Debezium Connect REST extension/s DBZ-6764

    • website-builder image fails with newer bundler DBZ-7269

    • Vitess connector build fails due to invalid GPG key DBZ-7280

    Outlook & What’s next?

    Debezium 2.5 was a feature packed milestone for the team, so after a few drinks and celebration, the plan is to turn our focus toward what is ahead for the 2.6 release. We already had our second Debezium Community meeting, discussed our road map, and we’re more than eager to get started.

    If you have any ideas or suggestions for what you’d like to see included in Debezium 2.6, please provide that feedback on our mailing list or in our Zulip chat.

    Merry Christmas and Happy New Year 2024!

    Onwards and Upwards!

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +</dependency>

    If you would like contribute to the Informix connector, we have added a new repository under the Debezium organization, debezium-connector-informix.

    I’d like to thank Lars Johansson for this contribution and his collaboration with the team, kudos!

    MariaDB

    Preview support

    The community has leveraged the MySQL connector as an alternative to capture changes from MariaDB for quite some time now; however that compatibility was primarily best-case effort.

    The Debezium 2.5 release stream aims to bring MariaDB to the forefront as a first-class connector by taking a very clear and methodological approach to incrementally check, validate, and eventually support MariaDB at the same capacity that we do MySQL. Our goal and hope is that we can do this within the scope of the MySQL connector proper; however, there is still quite a bit of ongoing investigation around GTID support that may influence the path forward.

    This first preview build of Debezium 2.5 has taken the first step, we’ve verified that the code works against a single MariaDB database deployment, the test suite passes and we’ve addressed any changes needed with the Binlog client to support that deployment. Our next steps is to look into GTID support, which MariaDB supports but using an approach that isn’t compatible with MySQL.

    Stay tuned for future builds as we continue to expand on this and we certainly welcome any early feedback.

    GTID support

    Both MySQL and MariaDB support what is called Global Transaction Identifiers or GTIDs. These are used in replication to uniquely identify transaction(s) uniquely across a cluster. The implementation details between MySQL and MariaDB differ significantly and in earlier versions of Debezium, we only supported GTID with MySQL.

    With Debezium 2.5, we are taking another step forward by introducing GTID support for MariaDB as part of the MySQL connector offering. In order to take advantage of this behavior, you will need to use the MariaDB driver rather than the MySQL driver by using a JDBC connection prefixed as jdbc:mariadb rather than jdbc:mysql. By doing this, you can now take full advantage of working with MariaDB and GTID just like MySQL (DBZ-1482).

    Other changes

    Altogether, 7 issues were fixed in this release and a total of 169 issues across all the Debezium 2.5 releases.

    • Adding Debezium Server example using MySQL and GCP PubSub DBZ-4471

    • Refactor ElapsedTimeStrategy DBZ-6778

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Single quote replication includes escaped quotes for N(CHAR/VARCHAR) columns DBZ-6975

    • Provide configuration option to exclude extension attributes from a CloudEvent DBZ-6982

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • Debezium doesn’t compile with JDK 21 DBZ-6992

    • OLM bundle version for GA releases is invalid DBZ-6994

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Remove deprecated embedded engine code DBZ-7013

    • Enable replication slot advance check DBZ-7015

    • Add configuration option to CloudEventsConverter to retrieve id and type from headers DBZ-7016

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • Use optional schema for Timezone Converter tests DBZ-7020

    • DDL statement couldn’t be parsed DBZ-7030

    • Blocking ad-hoc snapshot is not really blocking for MySQL DBZ-7035

    • Fake ROTATE event on connection restart cleans metadata DBZ-7037

    • Consolidate resource labels and annotations DBZ-7064

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Add (integration) tests for Oracle connector-specific Debezium Connect REST extension DBZ-6763

    • Intermittent failure of MongoDbReplicaSetAuthTest DBZ-6875

    • Connector frequently misses commit operations DBZ-6942

    • Missing events from Oracle 19c DBZ-6963

    • Mongodb tests in RHEL system testsuite are failing with DBZ 2.3.4 DBZ-6996

    • Use DebeziumEngine instead of EmbeddedEngine in the testsuite DBZ-7007

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Field exclusion does not work with events of removed fields DBZ-7058

    • Update transformation property "delete.tombstone.handling.mode" to debezium doc DBZ-7062

    • JDBC sink connector not working with CloudEvent DBZ-7065

    • JDBC connection leak when error occurs during processing DBZ-7069

    • Some server tests fail due to @com.google.inject.Inject annotation DBZ-7077

    • Add MariaDB driver for testing and distribution DBZ-7085

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    • HttpIT fails with "Unrecognized field subEvents" DBZ-7092

    • MySQL parser does not conform to arithmetical operation priorities DBZ-7095

    • VitessConnectorIT.shouldTaskFailIfColumnNameInvalid fails DBZ-7104

    • When RelationalBaseSourceConnector#validateConnection is called with invalid config [inside Connector#validate()] can lead to exceptions DBZ-7105

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    • Generate sundrio fluent builders for operator model DBZ-6550

    • Convert operator source into multi module project DBZ-6551

    • Implement "validate filters" endpoint in connector-specific Connect REST extensions DBZ-6762

    • Test Avro adjustment for MongoDb connector and ExtractNewDocumentState SMT DBZ-6809

    • Implement IT tests against Cloud Spanner emulator in main repo. DBZ-6906

    • The DefaultDeleteHandlingStrategy couldn’t add the rewrite "__deleted" field to a non-struct value DBZ-7066

    • Implement strategy pattern for MariaDB and MySQL differences DBZ-7083

    • Debezium server has no default for offset.flush.interval.ms DBZ-7099

    • Failed to authenticate to the MySQL database after snapshot DBZ-7132

    • Run MySQL CI builds in parallel DBZ-7135

    • Failure reading CURRENT_TIMESTAMP on Informix 12.10 DBZ-7137

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    • outbox.EventRouter SMT throws NullPointerException when there is a whitespace in fields.additional.placement value DBZ-7142

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UPDATE) DBZ-7152

    • Add matrix strategy to workflows DBZ-7154

    • Add Unit Tests for ServiceAccountDependent Class in Debezium Operator Repository DBZ-7155

    • JsonSerialisation is unable to process changes from sharded collections with composite sharding key DBZ-7157

    • Log sequence check should treat each redo thread independently DBZ-7158

    • Fail fast during deserialization if a value is not a CloudEvent DBZ-7159

    • Correctly calculate Max LSN DBZ-7175

    • Upgrade to Infinispan 14.0.20 DBZ-7187

    • Upgrade Outbox Extension to Quarkus 3.5.3 DBZ-7188

    • Fix DebeziumMySqlConnectorResource not using the new MySQL adatper structure to support different MySQL flavors DBZ-7179

    • Parsing MySQL indexes for JSON field fails, when casting is used with types double and float DBZ-7189

    • Unchanged toasted array columns are substituted with unavailable.value.placeholder, even when REPLICA IDENTITY FULL is configured. DBZ-7193

    • Enable ability to stream changes against Oracle 23c for LogMiner DBZ-7194

    • Add modify range_partitions to modify_table_partition rule in parsing PL/SQL DBZ-7196

    • MongoDB streaming pauses for Blocking Snapshot only when there is no event DBZ-7206

    • Handle Drop Tablespace in PL/SQL DBZ-7208

    • Upgrade logback to 1.2.12 DBZ-7209

    • NPE on AbstractInfinispanLogMinerEventProcessor.logCacheStats DBZ-7211

    • Oracle abandoned transaction implementation bug causes OoM DBZ-7236

    • Add Grammar Oracle Truncate Cluster DBZ-7242

    • Length value is not removed when changing a column’s type DBZ-7251

    • MongoDB table/collection snapshot notification contain incorrect offsets DBZ-7252

    • Broken support for multi-namespace watching DBZ-7254

    • Add tracing logs to track execution time for Debezium JDBC connector DBZ-7217

    • Validate & clarify multiple archive log destination requirements for Oracle DBZ-7218

    • Upgrade logback to 1.2.13 DBZ-7232

    • Add configuration option to CloudEventsConverter to customize schema type name DBZ-7235

    • Support persistent history for snapshot requests for the kafka signal topic. DBZ-7164

    • Change metrics endpoint of Connect REST Extensions to use the MBeanServerv directly instead of HTTP calls to the Jolokia endpoint DBZ-7177

    • Metrics endpoint must handle connectors with multiple tasks (SQL Server) DBZ-7178

    • DDL GRANT statement couldn’t be parsed DBZ-7213

    • Debezium Oracle plugin 2.5.0 Beta does not support Oracle 11g DBZ-7257

    • Error during snapshot with multiple snapshot threads will not properly abort snasphostting DBZ-7264

    • MySQL RDS UPDATE queries not ignored DBZ-7271

    • Leaking JDBC connections DBZ-7275

    • IncrementalSnapshotCaseSensitiveIT#insertDeleteWatermarkingStrategy fails DBZ-7276

    • Debezium MySQL could not parse certain grant privileges. DBZ-7277

    • Add PL/SQL Parser for Create Table Memoptimize DBZ-7279

    • Support for Creating EDITIONABLE or NONEDITIONABLE Packages DBZ-7283

    • Add PL/SQL Parser for Alter Table Memoptimize DBZ-7268

    • Move metrics endpoint from UI backend to the Debezium Connect REST extension/s DBZ-6764

    • website-builder image fails with newer bundler DBZ-7269

    • Vitess connector build fails due to invalid GPG key DBZ-7280

    Outlook & What’s next?

    Debezium 2.5 was a feature packed milestone for the team, so after a few drinks and celebration, the plan is to turn our focus toward what is ahead for the 2.6 release. We already had our second Debezium Community meeting, discussed our road map, and we’re more than eager to get started.

    If you have any ideas or suggestions for what you’d like to see included in Debezium 2.6, please provide that feedback on our mailing list or in our Zulip chat.

    Merry Christmas and Happy New Year 2024!

    Onwards and Upwards!

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/01/11/Debezium-and-TimescaleDB/index.html b/blog/2024/01/11/Debezium-and-TimescaleDB/index.html index 1ebd94eae6..56b5820748 100644 --- a/blog/2024/01/11/Debezium-and-TimescaleDB/index.html +++ b/blog/2024/01/11/Debezium-and-TimescaleDB/index.html @@ -71,4 +71,4 @@ _timescaledb_internal._hyper_1_1_chunk (1 row) -postgres=# SELECT compress_chunk( '_timescaledb_internal._hyper_1_1_chunk');

    Messages are written to timescaledb._timescaledb_internal._compressed_hypertable_3.

    Tear down the environment

    docker-compose -f docker-compose-timescaledb.yaml down

    Conclusion

    In this post, we have demonstrated the capturing of data from TimescaleDB time-series database and their processing by the TimescaleDb SMT. We have shown how messages are routed and enriched depending on hypertables and continuous aggregates acting as the source of data.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +postgres=# SELECT compress_chunk( '_timescaledb_internal._hyper_1_1_chunk');

    Messages are written to timescaledb._timescaledb_internal._compressed_hypertable_3.

    Tear down the environment

    docker-compose -f docker-compose-timescaledb.yaml down

    Conclusion

    In this post, we have demonstrated the capturing of data from TimescaleDB time-series database and their processing by the TimescaleDb SMT. We have shown how messages are routed and enriched depending on hypertables and continuous aggregates acting as the source of data.

    Jiri Pechanec

    Jiri is a software developer (and a former quality engineer) at Red Hat. He spent most of his career with Java and system integration projects and tasks. He lives near Brno, Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/01/22/debezium-2.6-alpha1-released/index.html b/blog/2024/01/22/debezium-2.6-alpha1-released/index.html index fc982d8d10..2e34adc278 100644 --- a/blog/2024/01/22/debezium-2.6-alpha1-released/index.html +++ b/blog/2024/01/22/debezium-2.6-alpha1-released/index.html @@ -1 +1 @@ - Debezium 2.6.0.Alpha1 Released

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    MongoDB
    • The MongoDB connector no longer supports the replica_set mode (DBZ-7260). This has been a feature that has been deprecated for several versions and there has been ongoing work over Debezium 2.x to achieve this goal. If you are using the replica_set mode, you will need to make adjustments when using Debezium 2.6+.

    Re-select Columns Post Processor
    • The re-select columns post processor used the key based on the message.key.columns when building the query. This is not correct for most tables with primary keys. The default behavior has changed and the table primary key is used by default. A new configuration option was introduced to allow the user to choose between whether to use the primary key or the generated key, reselect.use.event.key (DBZ-7358).

    Improvements and changes

    New Matching Collections API added

    One of the team’s ongoing tasks include the migration of Debezium UI’s backend into the main Debezium repository. One of the unique benefits with doing this is we can identify where there is code overlap between a connector’s runtime and the UI, and develop interface contracts to expose this shared data.

    Thanks to a community contribution for DBZ-7167, the RelationalBaseSourceConnector contract has been adjusted and a new method introduced to return a list of table names that match the connector’s specific configuration. Any connector that implements this abstract base class will need to implement this new method.

    CloudEvents schema name customization

    When using schema registry, event schemas need to be registered with a name so that they can be looked up upon later inquiries by pipelines. So when pairing CloudEvents formatted messages with schema registry, the same applies and in Debezium 2.6, you can explicitly control how the name is registered.

    By default, the schema for a CloudEvent message will be automatically generated by the converter. However, if the auto generated schema names are not sufficient, you can adjust the configuration by specifying dataSchemaName, which can be set either to generate (the default behavior) or header to pull the schema name directly from the specified event header field.

    Oracle Infinispan cache improvements

    The Debezium Oracle connector maintains a buffer of all in-flight transactions, and this buffer can be allocated off-heap using Infinispan. Sometimes, the user configuration specifies that if an in-flight transaction lasts longer than the specified number of milliseconds, the transaction can be abandoned or discarded by the buffer. This means that the transaction will be forgotten and not emitted by the connector.

    In order to improve metrics integration with frameworks like Grafana and Prometheus, a new JMX metric, AbandonedTransactionCount, was added to track the number of transactions that are abandoned by the connector during it’s runtime.

    Supports Spanner NEW_ROW_AND_OLD_VALUES value capture type

    Google Spanner’s value capture type is responsible for controlling how the change stream represents the change data in the event stream and are configured when constructing the change stream.

    Spanner introduced a new value capture mode called NEW_ROW_AND_OLD_VALUES, which is responsible for capturing all values of tracked columns, both modified and unmodified, whenever any column changes. This new mode is an improvement over NEW_ROW because it also includes the capture of old values, making it align with what you typically observe with other Debezium connectors.

    Other changes

    Altogether, 25 issues were fixed in this release:

    • Empty object sent to GCP Pub/Sub after DELETE event DBZ-7098

    • Notifications are Missing the ID field in log channel DBZ-7249

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (sub-query with UNION) DBZ-7259

    • Oracle DDL parsing error in PARTITION REFERENCE DBZ-7266

    • Enhance Oracle’s CREATE TABLE for Multiple Table Specifications DBZ-7286

    • Add service loader manifests for all Connect plugins DBZ-7298

    • PostgreSQL ad-hoc blocking snapshots fail when snapshot mode is "never" DBZ-7311

    • Ad-hoc blocking snapshot dies with "invalid snapshot identifier" immediately after connector creation DBZ-7312

    • Specifying a table include list with spaces between elements cause LogMiner queries to miss matches DBZ-7315

    • Debezium heartbeat.action.query does not start before writing to WAL: part 2 DBZ-7316

    • Update Groovy version to 4.x DBZ-7340

    • errors.max.retries is not used to stop retrying DBZ-7342

    • Upgrade Antora to 3.1.7 DBZ-7344

    • Oracle connector is occasionally unable to find SCN DBZ-7345

    • Initial snapshot notifications should use full identifier. DBZ-7347

    • Upgrade Outbox Extension to Quarkus 3.6.5 DBZ-7352

    • MySqlJdbcSinkDataTypeConverterIT#testBooleanDataTypeMapping fails DBZ-7355

    Outlook & What’s next?

    The Debezium 2.6 release cycle is one of our most ambitious initiatives with lots of new features and changes. You can find more about what the team is working on specifically for 2.6 and the road to Debezium 3.0 in our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    As the team continues springing into action with Debezium 2.6, we also intend to continue to bug fix and address any regressions that are reported to last quarter’s Debezium 2.5 release. Debezium 2.5 is now the project’s stable release, and we encourage everyone to upgrade and get the latest and greatest features. In fact, you can expect the next maintenance release of Debezium, 2.5.1.Final to be released later this week :).

    Until next time, happy streaming!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.6.0.Alpha1 Released

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    MongoDB
    • The MongoDB connector no longer supports the replica_set mode (DBZ-7260). This has been a feature that has been deprecated for several versions and there has been ongoing work over Debezium 2.x to achieve this goal. If you are using the replica_set mode, you will need to make adjustments when using Debezium 2.6+.

    Re-select Columns Post Processor
    • The re-select columns post processor used the key based on the message.key.columns when building the query. This is not correct for most tables with primary keys. The default behavior has changed and the table primary key is used by default. A new configuration option was introduced to allow the user to choose between whether to use the primary key or the generated key, reselect.use.event.key (DBZ-7358).

    Improvements and changes

    New Matching Collections API added

    One of the team’s ongoing tasks include the migration of Debezium UI’s backend into the main Debezium repository. One of the unique benefits with doing this is we can identify where there is code overlap between a connector’s runtime and the UI, and develop interface contracts to expose this shared data.

    Thanks to a community contribution for DBZ-7167, the RelationalBaseSourceConnector contract has been adjusted and a new method introduced to return a list of table names that match the connector’s specific configuration. Any connector that implements this abstract base class will need to implement this new method.

    CloudEvents schema name customization

    When using schema registry, event schemas need to be registered with a name so that they can be looked up upon later inquiries by pipelines. So when pairing CloudEvents formatted messages with schema registry, the same applies and in Debezium 2.6, you can explicitly control how the name is registered.

    By default, the schema for a CloudEvent message will be automatically generated by the converter. However, if the auto generated schema names are not sufficient, you can adjust the configuration by specifying dataSchemaName, which can be set either to generate (the default behavior) or header to pull the schema name directly from the specified event header field.

    Oracle Infinispan cache improvements

    The Debezium Oracle connector maintains a buffer of all in-flight transactions, and this buffer can be allocated off-heap using Infinispan. Sometimes, the user configuration specifies that if an in-flight transaction lasts longer than the specified number of milliseconds, the transaction can be abandoned or discarded by the buffer. This means that the transaction will be forgotten and not emitted by the connector.

    In order to improve metrics integration with frameworks like Grafana and Prometheus, a new JMX metric, AbandonedTransactionCount, was added to track the number of transactions that are abandoned by the connector during it’s runtime.

    Supports Spanner NEW_ROW_AND_OLD_VALUES value capture type

    Google Spanner’s value capture type is responsible for controlling how the change stream represents the change data in the event stream and are configured when constructing the change stream.

    Spanner introduced a new value capture mode called NEW_ROW_AND_OLD_VALUES, which is responsible for capturing all values of tracked columns, both modified and unmodified, whenever any column changes. This new mode is an improvement over NEW_ROW because it also includes the capture of old values, making it align with what you typically observe with other Debezium connectors.

    Other changes

    Altogether, 25 issues were fixed in this release:

    • Empty object sent to GCP Pub/Sub after DELETE event DBZ-7098

    • Notifications are Missing the ID field in log channel DBZ-7249

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (sub-query with UNION) DBZ-7259

    • Oracle DDL parsing error in PARTITION REFERENCE DBZ-7266

    • Enhance Oracle’s CREATE TABLE for Multiple Table Specifications DBZ-7286

    • Add service loader manifests for all Connect plugins DBZ-7298

    • PostgreSQL ad-hoc blocking snapshots fail when snapshot mode is "never" DBZ-7311

    • Ad-hoc blocking snapshot dies with "invalid snapshot identifier" immediately after connector creation DBZ-7312

    • Specifying a table include list with spaces between elements cause LogMiner queries to miss matches DBZ-7315

    • Debezium heartbeat.action.query does not start before writing to WAL: part 2 DBZ-7316

    • Update Groovy version to 4.x DBZ-7340

    • errors.max.retries is not used to stop retrying DBZ-7342

    • Upgrade Antora to 3.1.7 DBZ-7344

    • Oracle connector is occasionally unable to find SCN DBZ-7345

    • Initial snapshot notifications should use full identifier. DBZ-7347

    • Upgrade Outbox Extension to Quarkus 3.6.5 DBZ-7352

    • MySqlJdbcSinkDataTypeConverterIT#testBooleanDataTypeMapping fails DBZ-7355

    Outlook & What’s next?

    The Debezium 2.6 release cycle is one of our most ambitious initiatives with lots of new features and changes. You can find more about what the team is working on specifically for 2.6 and the road to Debezium 3.0 in our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    As the team continues springing into action with Debezium 2.6, we also intend to continue to bug fix and address any regressions that are reported to last quarter’s Debezium 2.5 release. Debezium 2.5 is now the project’s stable release, and we encourage everyone to upgrade and get the latest and greatest features. In fact, you can expect the next maintenance release of Debezium, 2.5.1.Final to be released later this week :).

    Until next time, happy streaming!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/01/30/debezium-2-5-1-final-released/index.html b/blog/2024/01/30/debezium-2-5-1-final-released/index.html index a0a693067b..c08524e75c 100644 --- a/blog/2024/01/30/debezium-2-5-1-final-released/index.html +++ b/blog/2024/01/30/debezium-2-5-1-final-released/index.html @@ -1 +1 @@ - Debezium 2.5.1.Final Released

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Re-select Columns Post Processor
    • The re-select columns post processor used the key based on the message.key.columns when building the query. This is not correct for most tables with primary keys. The default behavior has changed and the table primary key is used by default. A new configuration option was introduced to allow the user to choose between whether to use the primary key or the generated key, reselect.use.event.key (DBZ-7358).

    Improvements and changes

    MongoDB post-image changes

    The MongoDB connector’s event payload can be configured to include the full document that was changed in an update. The connector previously made an opinionated choice about how the full document would be fetched as part of the change stream; however, this behavior was not consistent with our expectations in all use cases.

    Debezium 2.5+ introduces a new configuration option, capture.mode.full.update.type, allowing the connector to explicitly control how the change stream’s full document lookup should be handled (DBZ-7299). The default value for this option is lookup, meaning that the database will make a separate look-up to fetch the full document. If you are working with MongoDB 6+, you can also elect to use post_image to rely on MongoDB change stream’s post-image support.

    Other changes

    Altogether, 21 issues were fixed in this release:

    • Connector is getting stopped while processing bulk update(50k) records in debezium server 2.0.1.Final DBZ-6955

    • Empty object sent to GCP Pub/Sub after DELETE event DBZ-7098

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (sub-query with UNION) DBZ-7259

    • Oracle DDL parsing error in PARTITION REFERENCE DBZ-7266

    • PostgreSQL ad-hoc blocking snapshots fail when snapshot mode is "never" DBZ-7311

    • Ad-hoc blocking snapshot dies with "invalid snapshot identifier" immediately after connector creation DBZ-7312

    • Specifying a table include list with spaces between elements cause LogMiner queries to miss matches DBZ-7315

    • Debezium heartbeat.action.query does not start before writing to WAL: part 2 DBZ-7316

    • Oracle connector is occasionally unable to find SCN DBZ-7345

    • Initial snapshot notifications should use full identifier. DBZ-7347

    • Debezium fails after table split operation DBZ-7360

    • MSSQL wrong default values in db schema for varchar, nvarchar, char columns DBZ-7374

    • Replace additional role binding definition in kubernetes.yml with @RBACRule DBZ-7381

    • Kinesis Sink Exception on PutRecord DBZ-7417

    • ParsingException (MariaDB Only): alterSpec drop foreign key with 'tablename.' prefix DBZ-7420

    A big thank you to all the contributors from the community who worked on this release: Anisha Mohanty, Breno Moreira, Chris Cranford, Ilyas Ahsan, Jakub Cechacek, James Johnston, Jiri Pechanec, Mario Fiore Vitale, Mickael Maison, Peter Hamer, Robert Roldan, V K, and leoloel!

    Outlook & What’s next?

    The team remains dedicated toward Debezium 2.6 over the next few months. However, in that time, you can expect that we’ll continue to review regressions and bugfixes, and provide maintenance releases for Debezium 2.5.

    In the meantime, if you’re interested in what the team has in store for Debezium 2.6 and beyond throughout 2024, we encourage you to review our road map, which includes details about our path to Debezium 3.0 later this year. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    We have two upcoming talks later this week at Fosdem 2024 in Brussels. Ondrej Babec will be speaking about performance testing and why it’s important and Vojtech Juranek will use Debezium to feed AI models in real-time. If you plan to attend, be sure to check out these talks and meet and greet with our Debezium engineers.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.5.1.Final Released

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Re-select Columns Post Processor
    • The re-select columns post processor used the key based on the message.key.columns when building the query. This is not correct for most tables with primary keys. The default behavior has changed and the table primary key is used by default. A new configuration option was introduced to allow the user to choose between whether to use the primary key or the generated key, reselect.use.event.key (DBZ-7358).

    Improvements and changes

    MongoDB post-image changes

    The MongoDB connector’s event payload can be configured to include the full document that was changed in an update. The connector previously made an opinionated choice about how the full document would be fetched as part of the change stream; however, this behavior was not consistent with our expectations in all use cases.

    Debezium 2.5+ introduces a new configuration option, capture.mode.full.update.type, allowing the connector to explicitly control how the change stream’s full document lookup should be handled (DBZ-7299). The default value for this option is lookup, meaning that the database will make a separate look-up to fetch the full document. If you are working with MongoDB 6+, you can also elect to use post_image to rely on MongoDB change stream’s post-image support.

    Other changes

    Altogether, 21 issues were fixed in this release:

    • Connector is getting stopped while processing bulk update(50k) records in debezium server 2.0.1.Final DBZ-6955

    • Empty object sent to GCP Pub/Sub after DELETE event DBZ-7098

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (sub-query with UNION) DBZ-7259

    • Oracle DDL parsing error in PARTITION REFERENCE DBZ-7266

    • PostgreSQL ad-hoc blocking snapshots fail when snapshot mode is "never" DBZ-7311

    • Ad-hoc blocking snapshot dies with "invalid snapshot identifier" immediately after connector creation DBZ-7312

    • Specifying a table include list with spaces between elements cause LogMiner queries to miss matches DBZ-7315

    • Debezium heartbeat.action.query does not start before writing to WAL: part 2 DBZ-7316

    • Oracle connector is occasionally unable to find SCN DBZ-7345

    • Initial snapshot notifications should use full identifier. DBZ-7347

    • Debezium fails after table split operation DBZ-7360

    • MSSQL wrong default values in db schema for varchar, nvarchar, char columns DBZ-7374

    • Replace additional role binding definition in kubernetes.yml with @RBACRule DBZ-7381

    • Kinesis Sink Exception on PutRecord DBZ-7417

    • ParsingException (MariaDB Only): alterSpec drop foreign key with 'tablename.' prefix DBZ-7420

    A big thank you to all the contributors from the community who worked on this release: Anisha Mohanty, Breno Moreira, Chris Cranford, Ilyas Ahsan, Jakub Cechacek, James Johnston, Jiri Pechanec, Mario Fiore Vitale, Mickael Maison, Peter Hamer, Robert Roldan, V K, and leoloel!

    Outlook & What’s next?

    The team remains dedicated toward Debezium 2.6 over the next few months. However, in that time, you can expect that we’ll continue to review regressions and bugfixes, and provide maintenance releases for Debezium 2.5.

    In the meantime, if you’re interested in what the team has in store for Debezium 2.6 and beyond throughout 2024, we encourage you to review our road map, which includes details about our path to Debezium 3.0 later this year. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    We have two upcoming talks later this week at Fosdem 2024 in Brussels. Ondrej Babec will be speaking about performance testing and why it’s important and Vojtech Juranek will use Debezium to feed AI models in real-time. If you plan to attend, be sure to check out these talks and meet and greet with our Debezium engineers.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/02/13/debezium-2.6-alpha2-released/index.html b/blog/2024/02/13/debezium-2.6-alpha2-released/index.html index de3385819d..c78f4ea5d2 100644 --- a/blog/2024/02/13/debezium-2.6-alpha2-released/index.html +++ b/blog/2024/02/13/debezium-2.6-alpha2-released/index.html @@ -1,4 +1,4 @@ Debezium 2.6.0.Alpha2 Released

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Vitess
    • The task configuration format used by previous versions of the connector could de-stabilize the Kafka Connect cluster. To resolve the problem, Debezium 2.6 introduces a new configuration format that is incompatible with the previous format (DBZ-7250). When upgrading, you may experience a NullPointerException and the error indicating that the connector was unable to instantiate a task because it contains an invalid task configuration.

      If you experience this problem, delete and re-create the connector, using the same name and configuration as before. The connector(s) will start and re-use the offsets last stored by using the same name, but will not re-use the old task configurations, avoiding the start-up failure.

    Improvements and changes

    Java 17 now compile-time requirement

    Debezium 3.0 which will debut later this fall will once again shift the Java baseline requirement from Java 11 to 17 to use Debezium. In preparation for Debezium 3 later this year, we are making the shift to a compile-time baseline for Debezium 2.6 and 2.7 to require Java 17 (DBZ-7387).

    If you are a Debezium user, and you consume Debezium connectors, this will require no action on your part. You can continue to use Java 11 for now without issue, understanding that Debezium 3 will require Java 17 later this year.

    If you are developing Debezium connectors, Java 17 is now baseline to compile the Debezium source. If you have been using Java 17, there should be no action taken on your part. If you previously were using Java 11, you will need to move to Java 17 in order to compile from source.

    If you are using the Debezium Quarkus Outbox Extension (not the Outbox SMT), as Quarkus 3.7+ is making the move to Java 17 as their baseline, the Debezium Quarkus Outbox Extension will now require Java 17 as a baseline for both runtime and compile time.

    We expect this transition to be mostly seamless for most users as this should have absolutely no impact on the runtime of Debezium’s connectors nor Debezium Server at this time.

    Asynchronous Embedded Engine

    If you’re hearing about the Embedded Engine for the first time, Debezium ships with three ways to run Debezium connectors. The most common is to deploy Debezium on Kafka Connect while the second most common is to use Debezium Server, a read-made runtime for Debezium connectors. However, there is a third option called the Embedded Engine, and it is what Debezium uses internally for its test suite, it’s the foundation for Debezium Server, and it’s meant to provide a way to embed Debezium connectors inside your own application. The embedded engine is used by a variety of external contributors and frameworks, most notably Apache Flink heavily relies on the embedded engine for their Debezium based CDC connectors.

    One of the biggest and major new features of Debezium 2.6 is the work on the asynchronous embedded engine that we are debuting in this alpha release. This new asynchronous version the foundation for which Debezium Server and the future of embedding Debezium is based. This change focuses on several key goals and initiatives:

    • Run multiple source tasks for a given connector, if the connector supports multiple tasks

    • Run time-consuming code (transformations or serialization) in dedicated threads

    • Allow additional performance by disabling event dispatch order

    • Provide future technology benefits of things such as virtual threads and delegating to external workers

    • Better integration with Debezium Operator for Kubernetes and Debezium UI

    • Seamlessly integrate with Quarkus for Debezium Server

    What this new asynchronous model does not include or focus on are the following:

    • Implement parallelization inside a connector’s main capture loop.

    • Remove any dependency from Kafka Connect

    • Add support for multiple source connectors per Engine deployment

    • Add support for sink connectors

    Even if a connector is single-threaded and does not support multiple tasks, a connector deployment using the Embedded Engine or Debezium Server can take advantage of the new asynchronous model. A large portion of time during even dispatch is spent on transformation and serialization phases, so utilizing the new dedicated worker threads for such stages improves throughput.

    For developers who want to get started with the new asynchronous embedded engine, a new package is now included in the debezium-embedded artifact called io.debezium.embedded.async and this package contains all the pertinent components to utilizing this new implementation. The asynchronous model can be constructed in a similar way to the serial version using the builder pattern, shown below.

    final DebeziumEngine engine = new AsyncEngine.AsyncEngineBuilder()
         .using(properties)
         .notifying(this::changeConsumerHandler)
    -    .build();

    We encourage everyone to take a look at the new Asynchronous Embedded Engine model, let us know your thoughts and if you spot any bugs or problems. We will be updating the documentation in coming releases to highlight all the benefits and changes, including examples. Until then, you can find all the details in the design document, DDD-7.

    Timestamp converter improvements

    Debezium released the new TimezoneConverter in Debezium 2.4, allowing users to target a specific time zone and to convert the outgoing payload time values to that targeted time zone. The original implementation was specifically restricted to allow conversion of values within the before or after parts of the payload; however, thanks to an improvement as a part of DBZ-7022, the converter can now be used to convert other time-based fields in the metadata, such as ts_ms in the source information block.

    This change helps to improve lag metric calculations in situations where the JVM running the connector is using a time zone that differs from the database and the calculation of the envelope ts_ms - source ts_ms results in a variance caused by the time zone. By using the TimezoneConverter to convert metadata fields, you can easily calculate the lag between those two fields without the time zone interfering.

    SQL Server query improvements

    The Debezium SQL Server utilizes a common SQL Server stored procedure called fn_cdc_get_all_changes…​ to fetch all the relevant captured changes for a given table. This query performs several unions and only ever returns data from one of the union sub-queries, which can be inefficient.

    Debezium 2.6 for SQL Server introduces a new configuration property data.query.mode that can be used to influence which specific method the connector will use to gather the details about table changes (DBZ-7273). The default remains unchanged from older releases, using the value function to delegate to the above aforementioned stored procedure. A new option, called direct, can be used instead to build the query directly within the connector to gather the changes more efficiently.

    Scoped Key/Trust - store support with MongoDB

    Debezium supports secure connections; however, MongoDB requires that the key/trust -store configurations be supplied as JVM process arguments, which is less than ideal for environments like the cloud. As a first step toward aligning how secure connection configuration is specified across our connectors, Debezium 2.6 for MongoDB now supports specifying scoped key/trust -store configurations in the connector configuration (DBZ-7379).

    The MongoDB connector now includes the following new configuration properties:

    mongodb.ssl.keystore

    Specifies the path to the SSL keystore file.

    mongodb.ssl.keystore.password

    Specifies the credentials to open and access the SSL keystore provided by mongodb.ssl.keystore.

    mongodb.ssl.keystore.type

    Specifies the SSL keystore file type, defaults to PKC512.

    mongodb.ssl.truststore

    Specifies the path to the SSL truststore file.

    mongodb.ssl.truststore.password

    Specifies the credentials to open and access the SSL truststore provided by mongodb.ssl.truststore.

    mongodb.ssl.truststore.type

    Specifies the SSL truststore file type, defaults to PKC512.

    Source transaction id changes

    All Debezium change events contain a special metadata block called the source information block. This part of the event payload is responsible for providing metadata about the change event, including the unique identifier of the change, the time the change happened, the database and table the change is in reference to, as well as transaction metadata about the transaction that the change participated in.

    In Debezium 2.6, the transaction_id field in the source information block will no longer be provided unless the field is populated with a value. This should present no issue for users as this field was only populated when the connector was configured with provide.transaction.metadata set to true (DBZ-7380).

    If you have tooling that expects the existence of the source information block’s transaction_id field although its optional, you will need to adjust that behavior as the field will no longer be present unless populated.

    Google PubSub Ordering Key Support

    The Debezium Server Google PubSub sink adapter has received a small update in Debezium 2.6. If you are streaming changes that have foreign key relationships, you may have wondered whether it was possible to specify an ordering key so that foreign key constraints could be maintained.

    Debezium 2.6 introduces a new configurable property for the Google PubSub sink adapter, ordering.key, which allows the sink adapter to use an externally provided ordering key from the connector configuration for the events rather than using the default behavior based on the event’s key (DBZ-7435).

    MongoDB UUID key support for Incremental snapshots

    As a small improvement to the Incremental Snapshot process for the Debezium for MongoDB connector, Debezium 2.6 adds support for the UUID data type, allowing this data type to be used within the Incremental Snapshot process like other data types (DBZ-7451).

    MongoDB post-image changes

    The MongoDB connector’s event payload can be configured to include the full document that was changed in an update. The connector previously made an opinionated choice about how the full document would be fetched as part of the change stream; however, this behavior was not consistent with our expectations in all use cases.

    Debezium 2.6 introduces a new configuration option, capture.mode.full.update.type, allowing the connector to explicitly control how the change stream’s full document lookup should be handled (DBZ-7299). The default value for this option is lookup, meaning that the database will make a separate look-up to fetch the full document. If you are working with MongoDB 6+, you can also elect to use post_image to rely on MongoDB change stream’s post-image support.

    Other changes

    Altogether, 66 issues were fixed in this release:

    • Add Number of records captured and processed as metrics for Debezium MongoDB Connector DBZ-6432

    • Connector is getting stopped while processing bulk update(50k) records in debezium server 2.0.1.Final DBZ-6955

    • Error when fail converting value with internal schema DBZ-7143

    • Remove obsolete MySQL version from TF DBZ-7173

    • Correctly handle METADATA records DBZ-7176

    • Move Snapshotter interface to core module as SPI DBZ-7300

    • Implement Snapshotter SPI MySQL/MariaDB DBZ-7301

    • Update the Debezium UI repo with local development infra and readme file. DBZ-7353

    • Debezium fails after table split operation DBZ-7360

    • Update QOSDK to the latest version DBZ-7361

    • Support DECFLOAT in Db2 connector DBZ-7362

    • Create PubSub example for DS deployed via operator DBZ-7370

    • Upstream artefact server image preparation job failing DBZ-7371

    • Informix-Connector breaks on table with numerical default value DBZ-7372

    • Tests in RHEL system testsuite fail to initialize Kafka containers DBZ-7373

    • MSSQL wrong default values in db schema for varchar, nvarchar, char columns DBZ-7374

    • Fix logging for schema only recovery mode in mysql connector DBZ-7376

    • Replace additional rolebinding definition in kubernetes.yml with @RBACRule DBZ-7381

    • Records from snapshot delivered out of order DBZ-7382

    • Upgrade json-path to 2.9.0 DBZ-7383

    • Fix mysql version in mysql-replication container images DBZ-7384

    • Reduce size of docker image for Debezium 2.6 and up DBZ-7385

    • Remove the use of Lombok in Debezium testsuite DBZ-7386

    • Upgrade Outbox Extension to Quarkus 3.7.0 DBZ-7388

    • Add dependancy update bot to the UI Repo DBZ-7392

    • Duplicate Debezium SMT transform DBZ-7416

    • Kinesis Sink Exception on PutRecord DBZ-7417

    • ParsingException (MariaDB Only): alterSpec drop foreign key with 'tablename.' prefix DBZ-7420

    • Poor performance with incremental snapshot with long list of tables DBZ-7421

    • Fix the unit test cases DBZ-7423

    • Oracle Snapshot mistakenly uses LogMiner Offset Loader by default DBZ-7425

    • Reselect columns should source key values from after Struct when not using event-key sources DBZ-7429

    • Allow the C3P0ConnectionProvider to be customized via configuration DBZ-7431

    • Stopwatch throw NPE when toString is called without having statistics DBZ-7436

    • ReselectColumnsPostProcessor filter not use exclude predicate DBZ-7437

    • Adopt Oracle 23 to Testing Farm DBZ-7439

    • Adhoc snapshots are not triggered via File channel signal when submitted before the start of the application DBZ-7441

    • Upgrade protobuf to 3.25.2 DBZ-7442

    • Correct debezium.sink.pubsub.flowcontrol.* variable names in Debezium Server docs site DBZ-7443

    • LogMiner batch size does not increase automatically DBZ-7445

    • Reduce string creation during SQL_REDO column read DBZ-7446

    • Evaluate container image size for Debezium UI served by nginx DBZ-7447

    • Upgrade Quarkus for Debezium Server to 3.2.9.Final DBZ-7449

    • Fix TimescaleDbDatabaseTest to run into test container DBZ-7452

    • Consolidate version management DBZ-7455

    • Oracle connector does not ignore reselection for excluded clob/blob columns DBZ-7456

    • Upgrade example-mongo image version to 6.0 DBZ-7457

    • The expected value pattern for table.include.list does not align with the documentation DBZ-7460

    • SQL Server queries with special characters fail after applying DBZ-7273 DBZ-7463

    • Signals actions are not loaded for SQLServer DBZ-7467

    • MySQL connector cannot parse table with WITH SYSTEM VERSIONING PARTITION BY SYSTEM_TIME DBZ-7468

    • Test Db2ReselectColumnsProcessorIT randomly fails DBZ-7471

    • Postgres images require clang-11 DBZ-7475

    • Make readiness and liveness proble timouts configurable DBZ-7476

    • Snapshotter SPI wrongly loaded on Debezium Server DBZ-7481

    Outlook & What’s next?

    We’ve reached the mid-way point for the quarter’s development cycle for 2.6 and the team is beginning our transition to the latter half where our focus is more on stability, regressions, and bug fixes. There are still a number of new features and improvements on the horizon, so you can expect those in the coming two weeks when our first beta preview release will be published for Debezium 2.6.

    As always, if you have any questions or interested in what the roadmap holds for not only 2.6 but also the road to the new Debezium 3.0 later this fall, we encourage you to take a look at our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + .build();

    We encourage everyone to take a look at the new Asynchronous Embedded Engine model, let us know your thoughts and if you spot any bugs or problems. We will be updating the documentation in coming releases to highlight all the benefits and changes, including examples. Until then, you can find all the details in the design document, DDD-7.

    Timestamp converter improvements

    Debezium released the new TimezoneConverter in Debezium 2.4, allowing users to target a specific time zone and to convert the outgoing payload time values to that targeted time zone. The original implementation was specifically restricted to allow conversion of values within the before or after parts of the payload; however, thanks to an improvement as a part of DBZ-7022, the converter can now be used to convert other time-based fields in the metadata, such as ts_ms in the source information block.

    This change helps to improve lag metric calculations in situations where the JVM running the connector is using a time zone that differs from the database and the calculation of the envelope ts_ms - source ts_ms results in a variance caused by the time zone. By using the TimezoneConverter to convert metadata fields, you can easily calculate the lag between those two fields without the time zone interfering.

    SQL Server query improvements

    The Debezium SQL Server utilizes a common SQL Server stored procedure called fn_cdc_get_all_changes…​ to fetch all the relevant captured changes for a given table. This query performs several unions and only ever returns data from one of the union sub-queries, which can be inefficient.

    Debezium 2.6 for SQL Server introduces a new configuration property data.query.mode that can be used to influence which specific method the connector will use to gather the details about table changes (DBZ-7273). The default remains unchanged from older releases, using the value function to delegate to the above aforementioned stored procedure. A new option, called direct, can be used instead to build the query directly within the connector to gather the changes more efficiently.

    Scoped Key/Trust - store support with MongoDB

    Debezium supports secure connections; however, MongoDB requires that the key/trust -store configurations be supplied as JVM process arguments, which is less than ideal for environments like the cloud. As a first step toward aligning how secure connection configuration is specified across our connectors, Debezium 2.6 for MongoDB now supports specifying scoped key/trust -store configurations in the connector configuration (DBZ-7379).

    The MongoDB connector now includes the following new configuration properties:

    mongodb.ssl.keystore

    Specifies the path to the SSL keystore file.

    mongodb.ssl.keystore.password

    Specifies the credentials to open and access the SSL keystore provided by mongodb.ssl.keystore.

    mongodb.ssl.keystore.type

    Specifies the SSL keystore file type, defaults to PKC512.

    mongodb.ssl.truststore

    Specifies the path to the SSL truststore file.

    mongodb.ssl.truststore.password

    Specifies the credentials to open and access the SSL truststore provided by mongodb.ssl.truststore.

    mongodb.ssl.truststore.type

    Specifies the SSL truststore file type, defaults to PKC512.

    Source transaction id changes

    All Debezium change events contain a special metadata block called the source information block. This part of the event payload is responsible for providing metadata about the change event, including the unique identifier of the change, the time the change happened, the database and table the change is in reference to, as well as transaction metadata about the transaction that the change participated in.

    In Debezium 2.6, the transaction_id field in the source information block will no longer be provided unless the field is populated with a value. This should present no issue for users as this field was only populated when the connector was configured with provide.transaction.metadata set to true (DBZ-7380).

    If you have tooling that expects the existence of the source information block’s transaction_id field although its optional, you will need to adjust that behavior as the field will no longer be present unless populated.

    Google PubSub Ordering Key Support

    The Debezium Server Google PubSub sink adapter has received a small update in Debezium 2.6. If you are streaming changes that have foreign key relationships, you may have wondered whether it was possible to specify an ordering key so that foreign key constraints could be maintained.

    Debezium 2.6 introduces a new configurable property for the Google PubSub sink adapter, ordering.key, which allows the sink adapter to use an externally provided ordering key from the connector configuration for the events rather than using the default behavior based on the event’s key (DBZ-7435).

    MongoDB UUID key support for Incremental snapshots

    As a small improvement to the Incremental Snapshot process for the Debezium for MongoDB connector, Debezium 2.6 adds support for the UUID data type, allowing this data type to be used within the Incremental Snapshot process like other data types (DBZ-7451).

    MongoDB post-image changes

    The MongoDB connector’s event payload can be configured to include the full document that was changed in an update. The connector previously made an opinionated choice about how the full document would be fetched as part of the change stream; however, this behavior was not consistent with our expectations in all use cases.

    Debezium 2.6 introduces a new configuration option, capture.mode.full.update.type, allowing the connector to explicitly control how the change stream’s full document lookup should be handled (DBZ-7299). The default value for this option is lookup, meaning that the database will make a separate look-up to fetch the full document. If you are working with MongoDB 6+, you can also elect to use post_image to rely on MongoDB change stream’s post-image support.

    Other changes

    Altogether, 66 issues were fixed in this release:

    • Add Number of records captured and processed as metrics for Debezium MongoDB Connector DBZ-6432

    • Connector is getting stopped while processing bulk update(50k) records in debezium server 2.0.1.Final DBZ-6955

    • Error when fail converting value with internal schema DBZ-7143

    • Remove obsolete MySQL version from TF DBZ-7173

    • Correctly handle METADATA records DBZ-7176

    • Move Snapshotter interface to core module as SPI DBZ-7300

    • Implement Snapshotter SPI MySQL/MariaDB DBZ-7301

    • Update the Debezium UI repo with local development infra and readme file. DBZ-7353

    • Debezium fails after table split operation DBZ-7360

    • Update QOSDK to the latest version DBZ-7361

    • Support DECFLOAT in Db2 connector DBZ-7362

    • Create PubSub example for DS deployed via operator DBZ-7370

    • Upstream artefact server image preparation job failing DBZ-7371

    • Informix-Connector breaks on table with numerical default value DBZ-7372

    • Tests in RHEL system testsuite fail to initialize Kafka containers DBZ-7373

    • MSSQL wrong default values in db schema for varchar, nvarchar, char columns DBZ-7374

    • Fix logging for schema only recovery mode in mysql connector DBZ-7376

    • Replace additional rolebinding definition in kubernetes.yml with @RBACRule DBZ-7381

    • Records from snapshot delivered out of order DBZ-7382

    • Upgrade json-path to 2.9.0 DBZ-7383

    • Fix mysql version in mysql-replication container images DBZ-7384

    • Reduce size of docker image for Debezium 2.6 and up DBZ-7385

    • Remove the use of Lombok in Debezium testsuite DBZ-7386

    • Upgrade Outbox Extension to Quarkus 3.7.0 DBZ-7388

    • Add dependancy update bot to the UI Repo DBZ-7392

    • Duplicate Debezium SMT transform DBZ-7416

    • Kinesis Sink Exception on PutRecord DBZ-7417

    • ParsingException (MariaDB Only): alterSpec drop foreign key with 'tablename.' prefix DBZ-7420

    • Poor performance with incremental snapshot with long list of tables DBZ-7421

    • Fix the unit test cases DBZ-7423

    • Oracle Snapshot mistakenly uses LogMiner Offset Loader by default DBZ-7425

    • Reselect columns should source key values from after Struct when not using event-key sources DBZ-7429

    • Allow the C3P0ConnectionProvider to be customized via configuration DBZ-7431

    • Stopwatch throw NPE when toString is called without having statistics DBZ-7436

    • ReselectColumnsPostProcessor filter not use exclude predicate DBZ-7437

    • Adopt Oracle 23 to Testing Farm DBZ-7439

    • Adhoc snapshots are not triggered via File channel signal when submitted before the start of the application DBZ-7441

    • Upgrade protobuf to 3.25.2 DBZ-7442

    • Correct debezium.sink.pubsub.flowcontrol.* variable names in Debezium Server docs site DBZ-7443

    • LogMiner batch size does not increase automatically DBZ-7445

    • Reduce string creation during SQL_REDO column read DBZ-7446

    • Evaluate container image size for Debezium UI served by nginx DBZ-7447

    • Upgrade Quarkus for Debezium Server to 3.2.9.Final DBZ-7449

    • Fix TimescaleDbDatabaseTest to run into test container DBZ-7452

    • Consolidate version management DBZ-7455

    • Oracle connector does not ignore reselection for excluded clob/blob columns DBZ-7456

    • Upgrade example-mongo image version to 6.0 DBZ-7457

    • The expected value pattern for table.include.list does not align with the documentation DBZ-7460

    • SQL Server queries with special characters fail after applying DBZ-7273 DBZ-7463

    • Signals actions are not loaded for SQLServer DBZ-7467

    • MySQL connector cannot parse table with WITH SYSTEM VERSIONING PARTITION BY SYSTEM_TIME DBZ-7468

    • Test Db2ReselectColumnsProcessorIT randomly fails DBZ-7471

    • Postgres images require clang-11 DBZ-7475

    • Make readiness and liveness proble timouts configurable DBZ-7476

    • Snapshotter SPI wrongly loaded on Debezium Server DBZ-7481

    Outlook & What’s next?

    We’ve reached the mid-way point for the quarter’s development cycle for 2.6 and the team is beginning our transition to the latter half where our focus is more on stability, regressions, and bug fixes. There are still a number of new features and improvements on the horizon, so you can expect those in the coming two weeks when our first beta preview release will be published for Debezium 2.6.

    As always, if you have any questions or interested in what the roadmap holds for not only 2.6 but also the road to the new Debezium 3.0 later this fall, we encourage you to take a look at our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/02/27/debezium-2-5-2-final-released/index.html b/blog/2024/02/27/debezium-2-5-2-final-released/index.html index d8d6305470..e0206eb409 100644 --- a/blog/2024/02/27/debezium-2-5-2-final-released/index.html +++ b/blog/2024/02/27/debezium-2-5-2-final-released/index.html @@ -1 +1 @@ - Debezium 2.5.2.Final Released

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    Improvements and changes

    Scoped Key/Trust - store support with MongoDB

    Debezium supports secure connections; however, MongoDB requires that the key/trust -store configurations be supplied as JVM process arguments, which is less than ideal for environments in the cloud. This feature was first introduced as part of Debezium 2.6, and we’ve opted to back-port this to Debezium 2.5. This new change allows per connector scoped key/trust -store configurations as part of the connector configuration (DBZ-7379).

    The MongoDB connector now includes the following new configuration properties:

    mongodb.ssl.keystore

    Specifies the path to the SSL keystore file.

    mongodb.ssl.keystore.password

    Specifies the credentials to open and access the SSL keystore provided by mongodb.ssl.keystore.

    mongodb.ssl.keystore.type

    Specifies the SSL keystore file type, defaults to PKC512.

    mongodb.ssl.truststore

    Specifies the path to the SSL truststore file.

    mongodb.ssl.truststore.password

    Specifies the credentials to open and access the SSL truststore provided by mongodb.ssl.truststore.

    mongodb.ssl.truststore.type

    Specifies the SSL truststore file type, defaults to PKC512.

    Other changes

    Altogether, 30 issues were fixed in this release:

    • PostgreSQL connector doesn’t restart properly if database if not reachable DBZ-6236

    • Correctly handle METADATA records DBZ-7176

    • Cassandra-4: Debezium connector stops producing events after a schema change DBZ-7363

    • Informix-Connector breaks on table with numerical default value DBZ-7372

    • Poor performance with incremental snapshot with long list of tables DBZ-7421

    • Oracle Snapshot mistakenly uses LogMiner Offset Loader by default DBZ-7425

    • Reselect columns should source key values from after Struct when not using event-key sources DBZ-7429

    • Allow the C3P0ConnectionProvider to be customized via configuration DBZ-7431

    • Stopwatch throw NPE when toString is called without having statistics DBZ-7436

    • Adhoc snapshots are not triggered via File channel signal when submitted before the start of the application DBZ-7441

    • Upgrade protobuf to 3.25.2 DBZ-7442

    • Correct debezium.sink.pubsub.flowcontrol.* variable names in Debezium Server docs site DBZ-7443

    • LogMiner batch size does not increase automatically DBZ-7445

    • Consolidate version management DBZ-7455

    • Oracle connector does not ignore reselection for excluded clob/blob columns DBZ-7456

    • The expected value pattern for table.include.list does not align with the documentation DBZ-7460

    • Signals actions are not loaded for SQLServer DBZ-7467

    • MySQL connector cannot parse table with WITH SYSTEM VERSIONING PARTITION BY SYSTEM_TIME DBZ-7468

    • Test Db2ReselectColumnsProcessorIT randomly fails DBZ-7471

    • Make readiness and liveness proble timouts configurable DBZ-7476

    • PreparedStatement leak in Oracle ReselectColumnsProcessor DBZ-7479

    • Document toggling MariaDB mode DBZ-7487

    • Poor snapshot performance with new reselect SMT DBZ-7488

    • Debezium Oracle Connector ParsingException on XMLTYPE with lob.enabled=true DBZ-7489

    • Fix MySQL 8 event timestamp resolution logic error where fallback to seconds occurs erroneously for non-GTID events DBZ-7500

    • Add Start CDC hook in Reselect Columns PostProcessor Tests DBZ-7516

    • Update commons-compress to 1.26.0 DBZ-7520

    • Promote JDBC sink from Incubating DBZ-7521

    • Fix null event timestamp possible from FORMAT_DESCRIPTION and PREVIOUS_GTIDS events in MySqlStreamingChangeEventSource::setEventTimestamp DBZ-7567

    A big thank you to all the contributors from the community who worked on this release: Andrey Pustovetov, Chris Cranford, Clifford Cheefoon, Jakub Cechacek, Jiri Pechanec, Lars M. Johansson, Lourens Naudé, Mario Fiore Vitale, Ondrej Babec, Robert Roldan, and Stavros Champilomatis!

    Outlook & What’s next?

    We are just over a month away from the next minor installment, Debezium 2.6. This next release is packed with tons of new features and changes that we are excited to share. We encourage you to take a moment, grab the pre-releases available here, and share your feedback.

    In a few short weeks, the team will begin to shift focus to what lies ahead with Debezium 2.7. This will be the last minor release of the Debezium 2.x release stream, which will include read-only incremental snapshots for other connectors, a new dedicated MariaDB source connector, and a MongoDB sink connector, just to name a few highlights. We will also be carrying over any work from 2.6 that we’re still working on but didn’t make it into that release.

    And in closing, the last half of the year will be focusing on Debezium 3, where we’ll be moving to Java 17, work on new time-series source connectors, and more. Please take a moment and review our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.5.2.Final Released

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    Improvements and changes

    Scoped Key/Trust - store support with MongoDB

    Debezium supports secure connections; however, MongoDB requires that the key/trust -store configurations be supplied as JVM process arguments, which is less than ideal for environments in the cloud. This feature was first introduced as part of Debezium 2.6, and we’ve opted to back-port this to Debezium 2.5. This new change allows per connector scoped key/trust -store configurations as part of the connector configuration (DBZ-7379).

    The MongoDB connector now includes the following new configuration properties:

    mongodb.ssl.keystore

    Specifies the path to the SSL keystore file.

    mongodb.ssl.keystore.password

    Specifies the credentials to open and access the SSL keystore provided by mongodb.ssl.keystore.

    mongodb.ssl.keystore.type

    Specifies the SSL keystore file type, defaults to PKC512.

    mongodb.ssl.truststore

    Specifies the path to the SSL truststore file.

    mongodb.ssl.truststore.password

    Specifies the credentials to open and access the SSL truststore provided by mongodb.ssl.truststore.

    mongodb.ssl.truststore.type

    Specifies the SSL truststore file type, defaults to PKC512.

    Other changes

    Altogether, 30 issues were fixed in this release:

    • PostgreSQL connector doesn’t restart properly if database if not reachable DBZ-6236

    • Correctly handle METADATA records DBZ-7176

    • Cassandra-4: Debezium connector stops producing events after a schema change DBZ-7363

    • Informix-Connector breaks on table with numerical default value DBZ-7372

    • Poor performance with incremental snapshot with long list of tables DBZ-7421

    • Oracle Snapshot mistakenly uses LogMiner Offset Loader by default DBZ-7425

    • Reselect columns should source key values from after Struct when not using event-key sources DBZ-7429

    • Allow the C3P0ConnectionProvider to be customized via configuration DBZ-7431

    • Stopwatch throw NPE when toString is called without having statistics DBZ-7436

    • Adhoc snapshots are not triggered via File channel signal when submitted before the start of the application DBZ-7441

    • Upgrade protobuf to 3.25.2 DBZ-7442

    • Correct debezium.sink.pubsub.flowcontrol.* variable names in Debezium Server docs site DBZ-7443

    • LogMiner batch size does not increase automatically DBZ-7445

    • Consolidate version management DBZ-7455

    • Oracle connector does not ignore reselection for excluded clob/blob columns DBZ-7456

    • The expected value pattern for table.include.list does not align with the documentation DBZ-7460

    • Signals actions are not loaded for SQLServer DBZ-7467

    • MySQL connector cannot parse table with WITH SYSTEM VERSIONING PARTITION BY SYSTEM_TIME DBZ-7468

    • Test Db2ReselectColumnsProcessorIT randomly fails DBZ-7471

    • Make readiness and liveness proble timouts configurable DBZ-7476

    • PreparedStatement leak in Oracle ReselectColumnsProcessor DBZ-7479

    • Document toggling MariaDB mode DBZ-7487

    • Poor snapshot performance with new reselect SMT DBZ-7488

    • Debezium Oracle Connector ParsingException on XMLTYPE with lob.enabled=true DBZ-7489

    • Fix MySQL 8 event timestamp resolution logic error where fallback to seconds occurs erroneously for non-GTID events DBZ-7500

    • Add Start CDC hook in Reselect Columns PostProcessor Tests DBZ-7516

    • Update commons-compress to 1.26.0 DBZ-7520

    • Promote JDBC sink from Incubating DBZ-7521

    • Fix null event timestamp possible from FORMAT_DESCRIPTION and PREVIOUS_GTIDS events in MySqlStreamingChangeEventSource::setEventTimestamp DBZ-7567

    A big thank you to all the contributors from the community who worked on this release: Andrey Pustovetov, Chris Cranford, Clifford Cheefoon, Jakub Cechacek, Jiri Pechanec, Lars M. Johansson, Lourens Naudé, Mario Fiore Vitale, Ondrej Babec, Robert Roldan, and Stavros Champilomatis!

    Outlook & What’s next?

    We are just over a month away from the next minor installment, Debezium 2.6. This next release is packed with tons of new features and changes that we are excited to share. We encourage you to take a moment, grab the pre-releases available here, and share your feedback.

    In a few short weeks, the team will begin to shift focus to what lies ahead with Debezium 2.7. This will be the last minor release of the Debezium 2.x release stream, which will include read-only incremental snapshots for other connectors, a new dedicated MariaDB source connector, and a MongoDB sink connector, just to name a few highlights. We will also be carrying over any work from 2.6 that we’re still working on but didn’t make it into that release.

    And in closing, the last half of the year will be focusing on Debezium 3, where we’ll be moving to Java 17, work on new time-series source connectors, and more. Please take a moment and review our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/03/06/debezium-2-6-beta1-released/index.html b/blog/2024/03/06/debezium-2-6-beta1-released/index.html index 2d9d36b2d6..d7b9d025de 100644 --- a/blog/2024/03/06/debezium-2-6-beta1-released/index.html +++ b/blog/2024/03/06/debezium-2-6-beta1-released/index.html @@ -33,4 +33,4 @@ "id": "571:53195832" ... } -}

    New Arbitrary-based payload formats

    While it’s common for users to utilize serialization based on Json, Avro, Protobufs, or CloudEvents, there may be reasons to use a more simplistic format. Thanks to a community contribution as part of DBZ-7512, Debezium can be configured to use two new formats called simplestring and binary.

    The simplestring and binary formats are configured in Debezium server using the debezium.format configurations. For simplestring, the payload will be serialized as a single STRING data type into the topic. For binary, the payload will be serialized as a BYTES using a byte[] (byte array).

    Oracle LogMiner Hybrid Mining Strategy

    Debezium 2.6 also introduces a new Oracle LogMiner mining strategy called hyrid, which can be enabled by setting the configuration property log.mining.strategy with the value of hybrid. This new strategy is designed to support all schema evolution features of the default mining strategy while taking advantage of all the performance optimizations from the online catalog strategy.

    The main problem with the online_catalog strategy is that if a mining step observes a schema change and a data change in the same mining step, LogMiner is incapable of reconstructing the SQL correctly, which will result in the table name being OBJ# xxxxxx or the columns represented as COL1, COL2, and so on. To avoid this while using the online catalog strategy, users are recommended to perform schema changes in a lock-step pattern to avoid a mining step that observes both a schema change and a data change together; however, this is not always feasible.

    The new hybrid strategy works by tracking a table’s object id at the database level and then using this identifier to look up the schema associated with the table from Debezium’s relational table model. In short, this allows Debezium to do what Oracle LogMiner is unable to do in these specific corner cases. The table name will be taken from the relational model’s table name and columns will be mapped by column position.

    Unfortunately, Oracle does not provide a way to reconstruct failed SQL operations for CLOB, BLOB, and XML data types. This means that the new hybrid strategy cannot be configured with configurations that use lob.enabled set to true. If a connector is started using the hybrid strategy and has lob.enabled set to true, the connector will fail to start and report a configuration failure.

    Other changes

    Altogether, 86 issues were fixed in this release:

    • MySQL config values validated twice DBZ-2015

    • PostgreSQL connector doesn’t restart properly if database if not reachable DBZ-6236

    • NullPointerException in MongoDB connector DBZ-6434

    • Tests in RHEL system testsuite throw errors without ocp cluster DBZ-7002

    • Move timeout configuration of MongoDbReplicaSet into Builder class DBZ-7054

    • Several Oracle tests fail regularly on Testing Farm infrastructure DBZ-7072

    • Remove obsolete MySQL version from TF DBZ-7173

    • Add Oracle 23 to CI test matrix DBZ-7195

    • Refactor sharded mongo ocp test DBZ-7221

    • Implement Snapshotter SPI Oracle DBZ-7302

    • Align snapshot modes for SQLServer DBZ-7303

    • Update snapshot mode documentation DBZ-7309

    • Cassandra-4: Debezium connector stops producing events after a schema change DBZ-7363

    • Upgrade ojdbc8 to 21.11.0.0 DBZ-7365

    • Document relation between column type and serializers for outbox DBZ-7368

    • Callout annotations rendered multiple times in downstream User Guide DBZ-7418

    • Test testEmptyChangesProducesHeartbeat tends to fail randomly DBZ-7453

    • Align snapshot modes for PostgreSQL, MySQL, Oracle DBZ-7461

    • PreparedStatement leak in Oracle ReselectColumnsProcessor DBZ-7479

    • Allow special characters in signal table name DBZ-7480

    • Document toggling MariaDB mode DBZ-7487

    • Poor snapshot performance with new reselect SMT DBZ-7488

    • Debezium Oracle Connector ParsingException on XMLTYPE with lob.enabled=true DBZ-7489

    • Add informix to main repository CI workflow DBZ-7490

    • Db2ReselectColumnsProcessorIT does not clean-up after test failures DBZ-7491

    • Disable Oracle Integration Tests on GitHub DBZ-7494

    • Unify and adjust thread time outs DBZ-7495

    • Completion callback called before connector stop DBZ-7496

    • Add "IF [NOT] EXISTS" DDL support for Oracle 23 DBZ-7498

    • Deployment examples show attribute name instead of its value DBZ-7499

    • Fix MySQL 8 event timestamp resolution logic error where fallback to seconds occurs erroneously for non-GTID events DBZ-7500

    • Remove incubating from Debezium documentation DBZ-7501

    • Add ability to parse Map<String, Object> into ConfigProperties DBZ-7503

    • LogMinerHelperIT test shouldAddCorrectLogFiles randomly fails DBZ-7504

    • Support Oracle 23 SELECT without FROM DBZ-7505

    • Add Oracle 23 Annotation support for CREATE/ALTER TABLE statements DBZ-7506

    • TestContainers MongoDbReplicaSetAuthTest randomly fails DBZ-7507

    • MySQl ReadOnlyIncrementalSnapshotIT testStopSnapshotKafkaSignal fails randomly DBZ-7508

    • Add Informix to Java Outreach DBZ-7510

    • Disable parallel record processing in DBZ server tests against Apicurio DBZ-7515

    • Add Start CDC hook in Reselect Columns PostProcessor Tests DBZ-7516

    • Remove the unused 'connector' parameter in the createSourceTask method in EmbeddedEngine.java DBZ-7517

    • Update commons-compress to 1.26.0 DBZ-7520

    • Promote JDBC sink from Incubating DBZ-7521

    • Allow to download containers also from Docker Hub DBZ-7524

    • Update rocketmq version DBZ-7525

    • signalLogWithEscapedCharacter fails with pgoutput-decoder DBZ-7526

    • Move RocketMQ dependency to debezium server DBZ-7527

    • Rework shouldGenerateSnapshotAndContinueStreaming assertions to deal with parallelization DBZ-7530

    • Multi-threaded snapshot can enqueue changes out of order DBZ-7534

    • AsyncEmbeddedEngineTest#testTasksAreStoppedIfSomeFailsToStart fails randomly DBZ-7535

    • MongoDbReplicaSetAuthTest fails randomly DBZ-7537

    • SQLServer tests taking long time due to database bad state DBZ-7541

    • Explicitly import jakarta dependencies that are excluded via glassfish filter DBZ-7545

    • ReadOnlyIncrementalSnapshotIT#testStopSnapshotKafkaSignal fails randomly DBZ-7553

    • Include RocketMQ and Redis container output into test log DBZ-7557

    • Allow XStream error ORA-23656 to be retried DBZ-7559

    • Numeric default value decimal scale mismatch DBZ-7562

    • Wait for Redis server to start DBZ-7564

    • Documentation conflict DBZ-7565

    • Fix null event timestamp possible from FORMAT_DESCRIPTION and PREVIOUS_GTIDS events in MySqlStreamingChangeEventSource::setEventTimestamp DBZ-7567

    • AsyncEmbeddedEngineTest.testExecuteSmt fails randomly DBZ-7568

    • Debezium fails to compile with JDK 21 DBZ-7569

    • Upgrade PostgreSQL driver to 42.6.1 DBZ-7571

    • Upgrade Kafka to 3.7.0 DBZ-7574

    • Redis tests fail randomly with JedisConnectionException: Unexpected end of stream DBZ-7576

    • RedisOffsetIT.testRedisConnectionRetry fails randomly DBZ-7578

    • Oracle connector always brings OLR dependencies DBZ-7579

    • Correct JDBC connector dependencies DBZ-7580

    • Improved logging in case of PostgreSQL failure DBZ-7581

    • Unavailable Toasted HSTORE Json Storage Mode column causes serialization failure DBZ-7582

    • Reduce debug logs on tests DBZ-7588

    • Server SQS sink doesn’t support quick profile DBZ-7590

    • Oracle Connector REST Extension Tests Fail DBZ-7597

    • Serialization of XML columns with NULL values fails using Infinispan Buffer DBZ-7598

    Outlook & What’s next?

    The next few weeks will be focused primarily on stability and bug fixes. We expect to release Debezium 2.6.0.Final in just under three weeks, so we courage you to download and test the latest Beta and provide your feedback.

    If you have any questions or interested in what the roadmap holds for not only 2.6 but also the road to the new Debezium 3.0 later this fall, we encourage you to take a look at our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    And in closing, our very own Mario Vitale will be speaking at Open Source Day 2024, where he will talk about Dealing with data consistency - a CDC approach to dual writes. Please be sure to check out his session on Day 1 as a part of the Beta track at 10:45am!

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    New Arbitrary-based payload formats

    While it’s common for users to utilize serialization based on Json, Avro, Protobufs, or CloudEvents, there may be reasons to use a more simplistic format. Thanks to a community contribution as part of DBZ-7512, Debezium can be configured to use two new formats called simplestring and binary.

    The simplestring and binary formats are configured in Debezium server using the debezium.format configurations. For simplestring, the payload will be serialized as a single STRING data type into the topic. For binary, the payload will be serialized as a BYTES using a byte[] (byte array).

    Oracle LogMiner Hybrid Mining Strategy

    Debezium 2.6 also introduces a new Oracle LogMiner mining strategy called hyrid, which can be enabled by setting the configuration property log.mining.strategy with the value of hybrid. This new strategy is designed to support all schema evolution features of the default mining strategy while taking advantage of all the performance optimizations from the online catalog strategy.

    The main problem with the online_catalog strategy is that if a mining step observes a schema change and a data change in the same mining step, LogMiner is incapable of reconstructing the SQL correctly, which will result in the table name being OBJ# xxxxxx or the columns represented as COL1, COL2, and so on. To avoid this while using the online catalog strategy, users are recommended to perform schema changes in a lock-step pattern to avoid a mining step that observes both a schema change and a data change together; however, this is not always feasible.

    The new hybrid strategy works by tracking a table’s object id at the database level and then using this identifier to look up the schema associated with the table from Debezium’s relational table model. In short, this allows Debezium to do what Oracle LogMiner is unable to do in these specific corner cases. The table name will be taken from the relational model’s table name and columns will be mapped by column position.

    Unfortunately, Oracle does not provide a way to reconstruct failed SQL operations for CLOB, BLOB, and XML data types. This means that the new hybrid strategy cannot be configured with configurations that use lob.enabled set to true. If a connector is started using the hybrid strategy and has lob.enabled set to true, the connector will fail to start and report a configuration failure.

    Other changes

    Altogether, 86 issues were fixed in this release:

    • MySQL config values validated twice DBZ-2015

    • PostgreSQL connector doesn’t restart properly if database if not reachable DBZ-6236

    • NullPointerException in MongoDB connector DBZ-6434

    • Tests in RHEL system testsuite throw errors without ocp cluster DBZ-7002

    • Move timeout configuration of MongoDbReplicaSet into Builder class DBZ-7054

    • Several Oracle tests fail regularly on Testing Farm infrastructure DBZ-7072

    • Remove obsolete MySQL version from TF DBZ-7173

    • Add Oracle 23 to CI test matrix DBZ-7195

    • Refactor sharded mongo ocp test DBZ-7221

    • Implement Snapshotter SPI Oracle DBZ-7302

    • Align snapshot modes for SQLServer DBZ-7303

    • Update snapshot mode documentation DBZ-7309

    • Cassandra-4: Debezium connector stops producing events after a schema change DBZ-7363

    • Upgrade ojdbc8 to 21.11.0.0 DBZ-7365

    • Document relation between column type and serializers for outbox DBZ-7368

    • Callout annotations rendered multiple times in downstream User Guide DBZ-7418

    • Test testEmptyChangesProducesHeartbeat tends to fail randomly DBZ-7453

    • Align snapshot modes for PostgreSQL, MySQL, Oracle DBZ-7461

    • PreparedStatement leak in Oracle ReselectColumnsProcessor DBZ-7479

    • Allow special characters in signal table name DBZ-7480

    • Document toggling MariaDB mode DBZ-7487

    • Poor snapshot performance with new reselect SMT DBZ-7488

    • Debezium Oracle Connector ParsingException on XMLTYPE with lob.enabled=true DBZ-7489

    • Add informix to main repository CI workflow DBZ-7490

    • Db2ReselectColumnsProcessorIT does not clean-up after test failures DBZ-7491

    • Disable Oracle Integration Tests on GitHub DBZ-7494

    • Unify and adjust thread time outs DBZ-7495

    • Completion callback called before connector stop DBZ-7496

    • Add "IF [NOT] EXISTS" DDL support for Oracle 23 DBZ-7498

    • Deployment examples show attribute name instead of its value DBZ-7499

    • Fix MySQL 8 event timestamp resolution logic error where fallback to seconds occurs erroneously for non-GTID events DBZ-7500

    • Remove incubating from Debezium documentation DBZ-7501

    • Add ability to parse Map<String, Object> into ConfigProperties DBZ-7503

    • LogMinerHelperIT test shouldAddCorrectLogFiles randomly fails DBZ-7504

    • Support Oracle 23 SELECT without FROM DBZ-7505

    • Add Oracle 23 Annotation support for CREATE/ALTER TABLE statements DBZ-7506

    • TestContainers MongoDbReplicaSetAuthTest randomly fails DBZ-7507

    • MySQl ReadOnlyIncrementalSnapshotIT testStopSnapshotKafkaSignal fails randomly DBZ-7508

    • Add Informix to Java Outreach DBZ-7510

    • Disable parallel record processing in DBZ server tests against Apicurio DBZ-7515

    • Add Start CDC hook in Reselect Columns PostProcessor Tests DBZ-7516

    • Remove the unused 'connector' parameter in the createSourceTask method in EmbeddedEngine.java DBZ-7517

    • Update commons-compress to 1.26.0 DBZ-7520

    • Promote JDBC sink from Incubating DBZ-7521

    • Allow to download containers also from Docker Hub DBZ-7524

    • Update rocketmq version DBZ-7525

    • signalLogWithEscapedCharacter fails with pgoutput-decoder DBZ-7526

    • Move RocketMQ dependency to debezium server DBZ-7527

    • Rework shouldGenerateSnapshotAndContinueStreaming assertions to deal with parallelization DBZ-7530

    • Multi-threaded snapshot can enqueue changes out of order DBZ-7534

    • AsyncEmbeddedEngineTest#testTasksAreStoppedIfSomeFailsToStart fails randomly DBZ-7535

    • MongoDbReplicaSetAuthTest fails randomly DBZ-7537

    • SQLServer tests taking long time due to database bad state DBZ-7541

    • Explicitly import jakarta dependencies that are excluded via glassfish filter DBZ-7545

    • ReadOnlyIncrementalSnapshotIT#testStopSnapshotKafkaSignal fails randomly DBZ-7553

    • Include RocketMQ and Redis container output into test log DBZ-7557

    • Allow XStream error ORA-23656 to be retried DBZ-7559

    • Numeric default value decimal scale mismatch DBZ-7562

    • Wait for Redis server to start DBZ-7564

    • Documentation conflict DBZ-7565

    • Fix null event timestamp possible from FORMAT_DESCRIPTION and PREVIOUS_GTIDS events in MySqlStreamingChangeEventSource::setEventTimestamp DBZ-7567

    • AsyncEmbeddedEngineTest.testExecuteSmt fails randomly DBZ-7568

    • Debezium fails to compile with JDK 21 DBZ-7569

    • Upgrade PostgreSQL driver to 42.6.1 DBZ-7571

    • Upgrade Kafka to 3.7.0 DBZ-7574

    • Redis tests fail randomly with JedisConnectionException: Unexpected end of stream DBZ-7576

    • RedisOffsetIT.testRedisConnectionRetry fails randomly DBZ-7578

    • Oracle connector always brings OLR dependencies DBZ-7579

    • Correct JDBC connector dependencies DBZ-7580

    • Improved logging in case of PostgreSQL failure DBZ-7581

    • Unavailable Toasted HSTORE Json Storage Mode column causes serialization failure DBZ-7582

    • Reduce debug logs on tests DBZ-7588

    • Server SQS sink doesn’t support quick profile DBZ-7590

    • Oracle Connector REST Extension Tests Fail DBZ-7597

    • Serialization of XML columns with NULL values fails using Infinispan Buffer DBZ-7598

    Outlook & What’s next?

    The next few weeks will be focused primarily on stability and bug fixes. We expect to release Debezium 2.6.0.Final in just under three weeks, so we courage you to download and test the latest Beta and provide your feedback.

    If you have any questions or interested in what the roadmap holds for not only 2.6 but also the road to the new Debezium 3.0 later this fall, we encourage you to take a look at our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    And in closing, our very own Mario Vitale will be speaking at Open Source Day 2024, where he will talk about Dealing with data consistency - a CDC approach to dual writes. Please be sure to check out his session on Day 1 as a part of the Beta track at 10:45am!

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/03/19/debezium-2-5-3-final-released/index.html b/blog/2024/03/19/debezium-2-5-3-final-released/index.html index 6cf4698253..2e5c556750 100644 --- a/blog/2024/03/19/debezium-2-5-3-final-released/index.html +++ b/blog/2024/03/19/debezium-2-5-3-final-released/index.html @@ -12,4 +12,4 @@ "id": "571:53195832" ... } -}

    PostgreSQL improvements

    Updated JDBC driver

    The PostgreSQL driver was upgraded to version 42.6.1.

    Improved logging

    During the start-up of the PostgreSQL connector, some users reported the connector stuck or acting as though it could be caught in an infinite loop. (DBZ-7581). The issue appeared that the pg_replication_slot_advance was taking longer than the read database timeout, and the exception thrown wasn’t satisfactory to identify the problem. In this case, the logging has been significantly improved to identify this root-cause so that users can make necessary adjustments if they encounter this problem.

    Other changes

    Altogether, 25 issues were fixed in this release:

    • NullPointerException in MongoDB connector DBZ-6434

    • Numeric default value decimal scale mismatch DBZ-7562

    • Documentation conflict DBZ-7565

    • Oracle connector always brings OLR dependencies DBZ-7579

    • Correct JDBC connector dependencies DBZ-7580

    • Unavailable Toasted HSTORE Json Storage Mode column causes serialization failure DBZ-7582

    • Incorrect value of TIME(n) replicate from MySQL if the original value is negative DBZ-7594

    • Re-select Post Processor not working for complex types DBZ-7596

    • Serialization of XML columns with NULL values fails using Infinispan Buffer DBZ-7598

    • Null instead of toast placeholder written for binary types when "hex" mode configured DBZ-7599

    • Poor snapshot performance during schema snapshot DDL processing DBZ-7608

    • Re-select post processor performance DBZ-7611

    • Uncaught exception during config validation in Engine DBZ-7614

    • Incremental snapshot query doesn’t honor message.key.columns order DBZ-7617

    • Metric ScnFreezeCount never increases DBZ-7619

    • Upgrade Debezium Server to Quarkus 3.2.10 DBZ-7624

    • Cassandra can have misaligned Jackson dependencies DBZ-7629

    • Numeric value without mantissa cannot be parsed DBZ-7643

    • Fix MySQL image fetch for tests DBZ-7651

    Outlook & What’s next?

    We are only just a few weeks away from the final build of Debezium 2.6. I would anticipate 2.6.0.CR1 build later in the week, with 2.6.0.Final sometime next week, barring no unforeseen issues.

    In a few short weeks, the team will begin to shift focus to what lies ahead with Debezium 2.7. This will be the last minor release of the Debezium 2.x release stream, which will include read-only incremental snapshots for other connectors, a new dedicated MariaDB source connector, and a MongoDB sink connector, just to name a few highlights. We will also be carrying over any work from 2.6 that we’re still working on but didn’t make it into that release.

    And in closing, the last half of the year will be focusing on Debezium 3, where we’ll be moving to Java 17, work on new time-series source connectors, and more. Please take a moment and review our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    PostgreSQL improvements

    Updated JDBC driver

    The PostgreSQL driver was upgraded to version 42.6.1.

    Improved logging

    During the start-up of the PostgreSQL connector, some users reported the connector stuck or acting as though it could be caught in an infinite loop. (DBZ-7581). The issue appeared that the pg_replication_slot_advance was taking longer than the read database timeout, and the exception thrown wasn’t satisfactory to identify the problem. In this case, the logging has been significantly improved to identify this root-cause so that users can make necessary adjustments if they encounter this problem.

    Other changes

    Altogether, 25 issues were fixed in this release:

    • NullPointerException in MongoDB connector DBZ-6434

    • Numeric default value decimal scale mismatch DBZ-7562

    • Documentation conflict DBZ-7565

    • Oracle connector always brings OLR dependencies DBZ-7579

    • Correct JDBC connector dependencies DBZ-7580

    • Unavailable Toasted HSTORE Json Storage Mode column causes serialization failure DBZ-7582

    • Incorrect value of TIME(n) replicate from MySQL if the original value is negative DBZ-7594

    • Re-select Post Processor not working for complex types DBZ-7596

    • Serialization of XML columns with NULL values fails using Infinispan Buffer DBZ-7598

    • Null instead of toast placeholder written for binary types when "hex" mode configured DBZ-7599

    • Poor snapshot performance during schema snapshot DDL processing DBZ-7608

    • Re-select post processor performance DBZ-7611

    • Uncaught exception during config validation in Engine DBZ-7614

    • Incremental snapshot query doesn’t honor message.key.columns order DBZ-7617

    • Metric ScnFreezeCount never increases DBZ-7619

    • Upgrade Debezium Server to Quarkus 3.2.10 DBZ-7624

    • Cassandra can have misaligned Jackson dependencies DBZ-7629

    • Numeric value without mantissa cannot be parsed DBZ-7643

    • Fix MySQL image fetch for tests DBZ-7651

    Outlook & What’s next?

    We are only just a few weeks away from the final build of Debezium 2.6. I would anticipate 2.6.0.CR1 build later in the week, with 2.6.0.Final sometime next week, barring no unforeseen issues.

    In a few short weeks, the team will begin to shift focus to what lies ahead with Debezium 2.7. This will be the last minor release of the Debezium 2.x release stream, which will include read-only incremental snapshots for other connectors, a new dedicated MariaDB source connector, and a MongoDB sink connector, just to name a few highlights. We will also be carrying over any work from 2.6 that we’re still working on but didn’t make it into that release.

    And in closing, the last half of the year will be focusing on Debezium 3, where we’ll be moving to Java 17, work on new time-series source connectors, and more. Please take a moment and review our road map. If you have any suggestions or ideas, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/03/25/debezium-2-6-cr1-released/index.html b/blog/2024/03/25/debezium-2-6-cr1-released/index.html index b33be456a1..f30fe5baea 100644 --- a/blog/2024/03/25/debezium-2-6-cr1-released/index.html +++ b/blog/2024/03/25/debezium-2-6-cr1-released/index.html @@ -1 +1 @@ - Debezium 2.6.0.CR1 Released

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    Breaking changes

    While we strive to avoid breaking changes, sometimes those changes are inevitable to evolve the right direction. This release includes several breaking changes.

    MySQL

    The MysQL driver was updated to version 8.3.0, and this driver is not compatible with MySQL 5.x. If you still need to use an older MySQL version, please downgrade the driver after installation to a version that is compatible with your database (DBZ-7652).

    SQL Server

    The SQL Server connector was not capturing all schemas when the connector was first deployed, and instead, was only capturing the schemas based on the tables defined in the configuration’s include list. This was a bug that could prevent users from easily adding new tables to the connector when expecting that the new table’s schema would already exist in the schema history topic. The connector now correctly honors the store.only.captured.tables.ddl configuration option (DBZ-7593).

    For existing connector deployments, if you do not specifically set the store.only.captured.tables.ddl property for the schema history topic, the connector will begin capturing schema changes for all relevant tables in your database. If you want to prevent this and retain the prior behavior, you will need to adjust your connector configuration by adding schema.history.internal.store.only.captured.tables.ddl with a value of true.

    Vitess

    The Vitess connector previously used the timestamp of BEGIN message as the source timestamp. This has been changed to the usage of the COMMIT timestamp to reflect the behaviour of other connectors (DBZ-7628).

    New features and improvements

    Debezium 2.6.CR1 also introduces more improvements and features, lets take a look at each individually.

    XML Support for OpenLogReplicator

    The Debezium for Oracle connector supports connections with OpenLogReplicator, allowing Oracle users to stream changes directly from the transaction logs. The latest build of OpenLogReplicator, version 1.5.0 has added support for XML column types.

    To get started streaming XML with OpenLogReplicator, please upgrade the OpenLogReplicator process to 1.5.0 and restart the replicator process. Be aware that if you want to stream binary-based XML column data, you will need to toggle this feature as enabled in the OpenLogReplicator configuration.

    TRACE level logging for Debezium Server

    Debezium Server is a ready-made runtime for Debezium source connectors that uses the Quarkus framework to manage the source and sink deployments. As most Debezium Server users are aware who have reached out with questions or bugs, we often ask for TRACE-level logs and this has often proven difficult as it requires a full rebuild of Debezium Server due to how minimum logging level is a build-time configuration in Quarkus.

    With Debezium 2.6.0.CR1 release and later, this will no longer be required. The build time configuration has been adjusted by default to include TRACE logging levels, so moving forward users can simply set the log level to TRACE and restart Debezium Server to obtain the logs (DBZ-7369).

    New Unified Snapshot Modes

    The snapshot process is an integral part of each connector’s lifecycle, and it’s responsible for collecting and sending all the historical data that exists in your data store to your target systems, if desired. For Debezium users who work with multiple connector types, we understand that having differing snapshot modes across connectors can sometimes be confusing to work with. So this change is designed to address that.

    For many of you who may have already tried or installed Debezium 2.6 pre-releases, you’re already using the unified snapshot SPI as it was designed to be a drop-in-replacement initially, requiring no changes. This release finishes that work for MongoDB and DB2.

    Of these changes, the most notable include the following:

    • All snapshot modes are available to all connectors, excluding never which remains specific to MySQL. This means that connectors that may have previously not supported a snapshot mode, such as when_needed, can now use this mode to retake a snapshot when the connector identifies that its necessary.

    • The schema_only_recovery mode has been deprecated and replaced by recovery.

    • The schema_only mode has also been deprecated and replaced by no_data.

    All deprecated modes will remain available until Debezium 3 later this year. This provides users with about six months to adjust scripts, configurations, and processes in advance.

    Cassandra configurable partition modes

    When a Debezium Cassandra connector read the commit logs, events are processed sequentially and added to a queue. If multiple queues exist, events become distributed between these queues based on the hash of the commit log filename. This resulted in situations where events could be emitted in non-chronological order.

    With Debezium 2.6, the Cassandra connector’s hashing algorithm now uses the partition column names to resolve the queue index for insertion. This should provide a more stable insert order so that events are emitted in the correct order.

    A new configuration option has been added to opt-in to this new behavior. Debezium users can add the new configuration property event.order.guarantee.mode set to partition_values to take advantage of this new mode. By default, the property retains the old behavior using a default of commitlog_file.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Log Mining Processor advances SCN incorrectly if LogMiner query returns no rows DBZ-6679

    • debezium-connector-jdbc occurred java.sql.SQLException: ORA-01461: can bind a LONG value only DBZ-6900

    • Align snapshot modes for MongoDB DBZ-7304

    • Align snapshot modes for DB2 DBZ-7305

    • Align all snapshot mode on all connectors DBZ-7308

    • Oracle connector unable to find SCN after Exadata maintenance updates DBZ-7389

    • Oracle LOB requery on Primary Key change does not work for all column types DBZ-7458

    • Incorrect value of TIME(n) replicate from MySQL if the original value is negative DBZ-7594

    • Re-select Post Processor not working for complex types DBZ-7596

    • Null instead of toast placeholder written for binary types when "hex" mode configured DBZ-7599

    • Poor snapshot performance during schema snapshot DDL processing DBZ-7608

    • Remove LogMiner continuous mining configuration option DBZ-7610

    • Re-select post processor performance DBZ-7611

    • Uncaught exception during config validation in Engine DBZ-7614

    • Enhanced event timestamp precision combined with ExtractNewRecordState not working DBZ-7615

    • Incremental snapshot query doesn’t honor message.key.columns order DBZ-7617

    • Metric ScnFreezeCount never increases DBZ-7619

    • JDBC connector does not process ByteBuffer field value DBZ-7620

    • Update Quarkus Outbox to Quarkus 3.8.2 DBZ-7623

    • Upgrade Debezium Server to Quarkus 3.2.10 DBZ-7624

    • MongoDbReplicaSet and MongoDbShardedCluster should not create a new network for each builder instance by default DBZ-7626

    • Cassandra can have misaligned Jackson dependencies DBZ-7629

    • Remove forgotten lombok code from system tests DBZ-7634

    • Numerci value without mantissa cannot be parsed DBZ-7643

    • Add JDBC connector to artifact server image preparation DBZ-7644

    • Revert removal of Oracle LogMiner continuous mining DBZ-7645

    • Add documentation for MongoDB capture.mode.full.update.type property DBZ-7647

    • Missing test annotation in PostgresConnectorIT DBZ-7649

    • Fix MySQL image fetch for tests DBZ-7651

    • RedisSchemaHistoryIT continually fails DBZ-7654

    • Upgrade Quarkus Outbox Extension to Quarkus 3.8.3 DBZ-7656

    • Bump SQL Server test image to SQL Server 2022 DBZ-7657

    • Upgrade Debezium Server to Quarkus 3.2.11.Final DBZ-7662

    • Update QOSDK and Quarkus to fix vcs-url annotation CVE DBZ-7664

    • Exclude jcl-over-slf4j dependency DBZ-7665

    • MySQL connector fails to parse DDL with RETURNING keyword DBZ-7666

    • Schema history comparator doesn’t handle SERVER_ID_KEY and TIMESTAMP_KEY properly DBZ-7690

    • Duplicate envar generated in operator bundle DBZ-7703

    What’s next?

    We have just over a week before the Debezium 2.6 final release. If you have not given any of the latest pre-releases a try, we encourage everyone to do so and provide feedback if you face any problems or issues deploying this release candidate. Our goal is to release the 2.6.0.Final build immediately after the upcoming holiday break.

    As the team begins to shift gears toward Debezium 2.7, here’s a speak peek at some upcoming highlights:

    • Standalone, Official MariaDB connector

    • User-friendly offset manipulation

    • Read-only incremental snapshots for all relational connectors

    • Sneak peek at the first PoC of the Debezium Server UI

    For more details, please check out our road map for all upcoming details around Debezium 2.7 and beyond.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. We’re interested to know what you’d like to see changed in Debezium 3, so let us know!

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.6.0.CR1 Released

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    Breaking changes

    While we strive to avoid breaking changes, sometimes those changes are inevitable to evolve the right direction. This release includes several breaking changes.

    MySQL

    The MysQL driver was updated to version 8.3.0, and this driver is not compatible with MySQL 5.x. If you still need to use an older MySQL version, please downgrade the driver after installation to a version that is compatible with your database (DBZ-7652).

    SQL Server

    The SQL Server connector was not capturing all schemas when the connector was first deployed, and instead, was only capturing the schemas based on the tables defined in the configuration’s include list. This was a bug that could prevent users from easily adding new tables to the connector when expecting that the new table’s schema would already exist in the schema history topic. The connector now correctly honors the store.only.captured.tables.ddl configuration option (DBZ-7593).

    For existing connector deployments, if you do not specifically set the store.only.captured.tables.ddl property for the schema history topic, the connector will begin capturing schema changes for all relevant tables in your database. If you want to prevent this and retain the prior behavior, you will need to adjust your connector configuration by adding schema.history.internal.store.only.captured.tables.ddl with a value of true.

    Vitess

    The Vitess connector previously used the timestamp of BEGIN message as the source timestamp. This has been changed to the usage of the COMMIT timestamp to reflect the behaviour of other connectors (DBZ-7628).

    New features and improvements

    Debezium 2.6.CR1 also introduces more improvements and features, lets take a look at each individually.

    XML Support for OpenLogReplicator

    The Debezium for Oracle connector supports connections with OpenLogReplicator, allowing Oracle users to stream changes directly from the transaction logs. The latest build of OpenLogReplicator, version 1.5.0 has added support for XML column types.

    To get started streaming XML with OpenLogReplicator, please upgrade the OpenLogReplicator process to 1.5.0 and restart the replicator process. Be aware that if you want to stream binary-based XML column data, you will need to toggle this feature as enabled in the OpenLogReplicator configuration.

    TRACE level logging for Debezium Server

    Debezium Server is a ready-made runtime for Debezium source connectors that uses the Quarkus framework to manage the source and sink deployments. As most Debezium Server users are aware who have reached out with questions or bugs, we often ask for TRACE-level logs and this has often proven difficult as it requires a full rebuild of Debezium Server due to how minimum logging level is a build-time configuration in Quarkus.

    With Debezium 2.6.0.CR1 release and later, this will no longer be required. The build time configuration has been adjusted by default to include TRACE logging levels, so moving forward users can simply set the log level to TRACE and restart Debezium Server to obtain the logs (DBZ-7369).

    New Unified Snapshot Modes

    The snapshot process is an integral part of each connector’s lifecycle, and it’s responsible for collecting and sending all the historical data that exists in your data store to your target systems, if desired. For Debezium users who work with multiple connector types, we understand that having differing snapshot modes across connectors can sometimes be confusing to work with. So this change is designed to address that.

    For many of you who may have already tried or installed Debezium 2.6 pre-releases, you’re already using the unified snapshot SPI as it was designed to be a drop-in-replacement initially, requiring no changes. This release finishes that work for MongoDB and DB2.

    Of these changes, the most notable include the following:

    • All snapshot modes are available to all connectors, excluding never which remains specific to MySQL. This means that connectors that may have previously not supported a snapshot mode, such as when_needed, can now use this mode to retake a snapshot when the connector identifies that its necessary.

    • The schema_only_recovery mode has been deprecated and replaced by recovery.

    • The schema_only mode has also been deprecated and replaced by no_data.

    All deprecated modes will remain available until Debezium 3 later this year. This provides users with about six months to adjust scripts, configurations, and processes in advance.

    Cassandra configurable partition modes

    When a Debezium Cassandra connector read the commit logs, events are processed sequentially and added to a queue. If multiple queues exist, events become distributed between these queues based on the hash of the commit log filename. This resulted in situations where events could be emitted in non-chronological order.

    With Debezium 2.6, the Cassandra connector’s hashing algorithm now uses the partition column names to resolve the queue index for insertion. This should provide a more stable insert order so that events are emitted in the correct order.

    A new configuration option has been added to opt-in to this new behavior. Debezium users can add the new configuration property event.order.guarantee.mode set to partition_values to take advantage of this new mode. By default, the property retains the old behavior using a default of commitlog_file.

    Other fixes

    In addition, there were quite a number of stability and bug fixes that made it into this release. These include the following:

    • Log Mining Processor advances SCN incorrectly if LogMiner query returns no rows DBZ-6679

    • debezium-connector-jdbc occurred java.sql.SQLException: ORA-01461: can bind a LONG value only DBZ-6900

    • Align snapshot modes for MongoDB DBZ-7304

    • Align snapshot modes for DB2 DBZ-7305

    • Align all snapshot mode on all connectors DBZ-7308

    • Oracle connector unable to find SCN after Exadata maintenance updates DBZ-7389

    • Oracle LOB requery on Primary Key change does not work for all column types DBZ-7458

    • Incorrect value of TIME(n) replicate from MySQL if the original value is negative DBZ-7594

    • Re-select Post Processor not working for complex types DBZ-7596

    • Null instead of toast placeholder written for binary types when "hex" mode configured DBZ-7599

    • Poor snapshot performance during schema snapshot DDL processing DBZ-7608

    • Remove LogMiner continuous mining configuration option DBZ-7610

    • Re-select post processor performance DBZ-7611

    • Uncaught exception during config validation in Engine DBZ-7614

    • Enhanced event timestamp precision combined with ExtractNewRecordState not working DBZ-7615

    • Incremental snapshot query doesn’t honor message.key.columns order DBZ-7617

    • Metric ScnFreezeCount never increases DBZ-7619

    • JDBC connector does not process ByteBuffer field value DBZ-7620

    • Update Quarkus Outbox to Quarkus 3.8.2 DBZ-7623

    • Upgrade Debezium Server to Quarkus 3.2.10 DBZ-7624

    • MongoDbReplicaSet and MongoDbShardedCluster should not create a new network for each builder instance by default DBZ-7626

    • Cassandra can have misaligned Jackson dependencies DBZ-7629

    • Remove forgotten lombok code from system tests DBZ-7634

    • Numerci value without mantissa cannot be parsed DBZ-7643

    • Add JDBC connector to artifact server image preparation DBZ-7644

    • Revert removal of Oracle LogMiner continuous mining DBZ-7645

    • Add documentation for MongoDB capture.mode.full.update.type property DBZ-7647

    • Missing test annotation in PostgresConnectorIT DBZ-7649

    • Fix MySQL image fetch for tests DBZ-7651

    • RedisSchemaHistoryIT continually fails DBZ-7654

    • Upgrade Quarkus Outbox Extension to Quarkus 3.8.3 DBZ-7656

    • Bump SQL Server test image to SQL Server 2022 DBZ-7657

    • Upgrade Debezium Server to Quarkus 3.2.11.Final DBZ-7662

    • Update QOSDK and Quarkus to fix vcs-url annotation CVE DBZ-7664

    • Exclude jcl-over-slf4j dependency DBZ-7665

    • MySQL connector fails to parse DDL with RETURNING keyword DBZ-7666

    • Schema history comparator doesn’t handle SERVER_ID_KEY and TIMESTAMP_KEY properly DBZ-7690

    • Duplicate envar generated in operator bundle DBZ-7703

    What’s next?

    We have just over a week before the Debezium 2.6 final release. If you have not given any of the latest pre-releases a try, we encourage everyone to do so and provide feedback if you face any problems or issues deploying this release candidate. Our goal is to release the 2.6.0.Final build immediately after the upcoming holiday break.

    As the team begins to shift gears toward Debezium 2.7, here’s a speak peek at some upcoming highlights:

    • Standalone, Official MariaDB connector

    • User-friendly offset manipulation

    • Read-only incremental snapshots for all relational connectors

    • Sneak peek at the first PoC of the Debezium Server UI

    For more details, please check out our road map for all upcoming details around Debezium 2.7 and beyond.

    As always, please be sure to get in touch with us on the mailing list or Zulip chat if you have questions or feedback. We’re interested to know what you’d like to see changed in Debezium 3, so let us know!

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/04/02/debezium-2-6-final-released/index.html b/blog/2024/04/02/debezium-2-6-final-released/index.html index 7fc4f48edf..5cf5cae23c 100644 --- a/blog/2024/04/02/debezium-2-6-final-released/index.html +++ b/blog/2024/04/02/debezium-2-6-final-released/index.html @@ -36,4 +36,4 @@ "id": "571:53195832" ... } -}

    Supports Spanner NEW_ROW_AND_OLD_VALUES value capture type

    Google Spanner’s value capture type is responsible for controlling how the change stream represents the change data in the event stream and are configured when constructing the change stream.

    Spanner introduced a new value capture mode called NEW_ROW_AND_OLD_VALUES, which is responsible for capturing all values of tracked columns, both modified and unmodified, whenever any column changes. This new mode is an improvement over NEW_ROW because it also includes the capture of old values, making it align with what you typically observe with other Debezium connectors.

    New Arbitrary-based payload formats

    While it’s common for users to utilize serialization based on Json, Avro, Protobufs, or CloudEvents, there may be reasons to use a more simplistic format. Thanks to a community contribution as part of DBZ-7512, Debezium can be configured to use two new formats called simplestring and binary.

    The simplestring and binary formats are configured in Debezium server using the debezium.format configurations. For simplestring, the payload will be serialized as a single STRING data type into the topic. For binary, the payload will be serialized as a BYTES using a byte[] (byte array).

    TRACE level logging for Debezium Server

    Debezium Server is a ready-made runtime for Debezium source connectors that uses the Quarkus framework to manage the source and sink deployments. As most Debezium Server users are aware who have reached out with questions or bugs, we often ask for TRACE-level logs and this has often proven difficult as it requires a full rebuild of Debezium Server due to how minimum logging level is a build-time configuration in Quarkus.

    With Debezium 2.6.0.CR1 release and later, this will no longer be required. The build time configuration has been adjusted by default to include TRACE logging levels, so moving forward users can simply set the log level to TRACE and restart Debezium Server to obtain the logs (DBZ-7369).

    Google PubSub Ordering Key Support

    The Debezium Server Google PubSub sink adapter has received a small update in Debezium 2.6. If you are streaming changes that have foreign key relationships, you may have wondered whether it was possible to specify an ordering key so that foreign key constraints could be maintained.

    Debezium 2.6 introduces a new configurable property for the Google PubSub sink adapter, ordering.key, which allows the sink adapter to use an externally provided ordering key from the connector configuration for the events rather than using the default behavior based on the event’s key (DBZ-7435).

    CloudEvents schema name customization

    When using schema registry, event schemas need to be registered with a name so that they can be looked up upon later inquiries by pipelines. So when pairing CloudEvents formatted messages with schema registry, the same applies and in Debezium 2.6, you can explicitly control how the name is registered.

    By default, the schema for a CloudEvent message will be automatically generated by the converter. However, if the auto generated schema names are not sufficient, you can adjust the configuration by specifying dataSchemaName, which can be set either to generate (the default behavior) or header to pull the schema name directly from the specified event header field.

    Timestamp converter improvements

    Debezium released the new TimezoneConverter in Debezium 2.4, allowing users to target a specific time zone and to convert the outgoing payload time values to that targeted time zone. The original implementation was specifically restricted to allow conversion of values within the before or after parts of the payload; however, thanks to an improvement as a part of DBZ-7022, the converter can now be used to convert other time-based fields in the metadata, such as ts_ms in the source information block.

    This change helps to improve lag metric calculations in situations where the JVM running the connector is using a time zone that differs from the database and the calculation of the envelope ts_ms - source ts_ms results in a variance caused by the time zone. By using the TimezoneConverter to convert metadata fields, you can easily calculate the lag between those two fields without the time zone interfering.

    Signal table watermark metadata

    An incremental snapshot process requires a signal table to write open/close markers to coordinate the change boundaries with the data recorded in the transaction logs, unless you’re using MySQL’s read-only flavor. In some cases, users would like to be able to track the window time slot, knowing when the window was opened and closed.

    Starting with Debezium 2.6, the data column in the signal table will be populated with the time window details, allowing users to obtain when the window was opened and closed. The following shows the details of the data column for each of the two signal markers:

    Window Open Marker
    {"openWindowTimestamp": "<window-open-time>"}
    Window Close Marker
    {"openWindowTimestamp": "<window-open-time>", "closeWindowTimestamp": "<window-close-time>"}

    TRACE level logging for Debezium Server

    Debezium Server is a ready-made runtime for Debezium source connectors that uses the Quarkus framework to manage the source and sink deployments. As most Debezium Server users are aware who have reached out with questions or bugs, we often ask for TRACE-level logs and this has often proven difficult as it requires a full rebuild of Debezium Server due to how minimum logging level is a build-time configuration in Quarkus.

    With Debezium 2.6+ release, this will no longer be required. The build time configuration has been adjusted by default to include TRACE logging levels, so moving forward users can simply set the log level to TRACE and restart Debezium Server to obtain the logs (DBZ-7369).

    Cassandra configurable partition modes

    When a Debezium Cassandra connector read the commit logs, events are processed sequentially and added to a queue. If multiple queues exist, events become distributed between these queues based on the hash of the commit log filename. This resulted in situations where events could be emitted in non-chronological order.

    With Debezium 2.6, the Cassandra connector’s hashing algorithm now uses the partition column names to resolve the queue index for insertion. This should provide a more stable insert order so that events are emitted in the correct order.

    A new configuration option has been added to opt-in to this new behavior. Debezium users can add the new configuration property event.order.guarantee.mode set to partition_values to take advantage of this new mode. By default, the property retains the old behavior using a default of commitlog_file.

    Other fixes

    Outlook & What’s next?

    With Debezium 2.6 released, the team has already started work on Debezium 2.7, which will be released later this year in June. This upcoming release will feature a standalone MariaDB connector, user-friendly offset manipulation, read-only incremental snapshots for relational connectors, and possibly a sneak peek at the first PoC for Debezium Server’s UI.

    This next quarter is equally ambitious with its deliverables, and we’d like to ask you to join the conversation. You can read all the details on the project’s 2024 road map, and get in touch with us on the mailing list or Zulip chat. We would love to hear your feedback on the road map and any suggestions you may that may not be included.

    This upcoming quarter will mark the last and final release in the Debezium 2.x release stream with Debezium 2.7. With a new major release brewing, this is now the time for code clean-up and deprecation removal. If you have not taken a moment to review features that may have been scheduled for removal, we ask that you do and offer your feedback as soon as possible. We want to guarantee that the transition to Debezium 3 is as much of a drop-in replacement as possible, but we cannot do that without your help.

    With spring in full swing, don’t forget to stop and enjoy the roses. Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    Supports Spanner NEW_ROW_AND_OLD_VALUES value capture type

    Google Spanner’s value capture type is responsible for controlling how the change stream represents the change data in the event stream and are configured when constructing the change stream.

    Spanner introduced a new value capture mode called NEW_ROW_AND_OLD_VALUES, which is responsible for capturing all values of tracked columns, both modified and unmodified, whenever any column changes. This new mode is an improvement over NEW_ROW because it also includes the capture of old values, making it align with what you typically observe with other Debezium connectors.

    New Arbitrary-based payload formats

    While it’s common for users to utilize serialization based on Json, Avro, Protobufs, or CloudEvents, there may be reasons to use a more simplistic format. Thanks to a community contribution as part of DBZ-7512, Debezium can be configured to use two new formats called simplestring and binary.

    The simplestring and binary formats are configured in Debezium server using the debezium.format configurations. For simplestring, the payload will be serialized as a single STRING data type into the topic. For binary, the payload will be serialized as a BYTES using a byte[] (byte array).

    TRACE level logging for Debezium Server

    Debezium Server is a ready-made runtime for Debezium source connectors that uses the Quarkus framework to manage the source and sink deployments. As most Debezium Server users are aware who have reached out with questions or bugs, we often ask for TRACE-level logs and this has often proven difficult as it requires a full rebuild of Debezium Server due to how minimum logging level is a build-time configuration in Quarkus.

    With Debezium 2.6.0.CR1 release and later, this will no longer be required. The build time configuration has been adjusted by default to include TRACE logging levels, so moving forward users can simply set the log level to TRACE and restart Debezium Server to obtain the logs (DBZ-7369).

    Google PubSub Ordering Key Support

    The Debezium Server Google PubSub sink adapter has received a small update in Debezium 2.6. If you are streaming changes that have foreign key relationships, you may have wondered whether it was possible to specify an ordering key so that foreign key constraints could be maintained.

    Debezium 2.6 introduces a new configurable property for the Google PubSub sink adapter, ordering.key, which allows the sink adapter to use an externally provided ordering key from the connector configuration for the events rather than using the default behavior based on the event’s key (DBZ-7435).

    CloudEvents schema name customization

    When using schema registry, event schemas need to be registered with a name so that they can be looked up upon later inquiries by pipelines. So when pairing CloudEvents formatted messages with schema registry, the same applies and in Debezium 2.6, you can explicitly control how the name is registered.

    By default, the schema for a CloudEvent message will be automatically generated by the converter. However, if the auto generated schema names are not sufficient, you can adjust the configuration by specifying dataSchemaName, which can be set either to generate (the default behavior) or header to pull the schema name directly from the specified event header field.

    Timestamp converter improvements

    Debezium released the new TimezoneConverter in Debezium 2.4, allowing users to target a specific time zone and to convert the outgoing payload time values to that targeted time zone. The original implementation was specifically restricted to allow conversion of values within the before or after parts of the payload; however, thanks to an improvement as a part of DBZ-7022, the converter can now be used to convert other time-based fields in the metadata, such as ts_ms in the source information block.

    This change helps to improve lag metric calculations in situations where the JVM running the connector is using a time zone that differs from the database and the calculation of the envelope ts_ms - source ts_ms results in a variance caused by the time zone. By using the TimezoneConverter to convert metadata fields, you can easily calculate the lag between those two fields without the time zone interfering.

    Signal table watermark metadata

    An incremental snapshot process requires a signal table to write open/close markers to coordinate the change boundaries with the data recorded in the transaction logs, unless you’re using MySQL’s read-only flavor. In some cases, users would like to be able to track the window time slot, knowing when the window was opened and closed.

    Starting with Debezium 2.6, the data column in the signal table will be populated with the time window details, allowing users to obtain when the window was opened and closed. The following shows the details of the data column for each of the two signal markers:

    Window Open Marker
    {"openWindowTimestamp": "<window-open-time>"}
    Window Close Marker
    {"openWindowTimestamp": "<window-open-time>", "closeWindowTimestamp": "<window-close-time>"}

    TRACE level logging for Debezium Server

    Debezium Server is a ready-made runtime for Debezium source connectors that uses the Quarkus framework to manage the source and sink deployments. As most Debezium Server users are aware who have reached out with questions or bugs, we often ask for TRACE-level logs and this has often proven difficult as it requires a full rebuild of Debezium Server due to how minimum logging level is a build-time configuration in Quarkus.

    With Debezium 2.6+ release, this will no longer be required. The build time configuration has been adjusted by default to include TRACE logging levels, so moving forward users can simply set the log level to TRACE and restart Debezium Server to obtain the logs (DBZ-7369).

    Cassandra configurable partition modes

    When a Debezium Cassandra connector read the commit logs, events are processed sequentially and added to a queue. If multiple queues exist, events become distributed between these queues based on the hash of the commit log filename. This resulted in situations where events could be emitted in non-chronological order.

    With Debezium 2.6, the Cassandra connector’s hashing algorithm now uses the partition column names to resolve the queue index for insertion. This should provide a more stable insert order so that events are emitted in the correct order.

    A new configuration option has been added to opt-in to this new behavior. Debezium users can add the new configuration property event.order.guarantee.mode set to partition_values to take advantage of this new mode. By default, the property retains the old behavior using a default of commitlog_file.

    Other fixes

    Outlook & What’s next?

    With Debezium 2.6 released, the team has already started work on Debezium 2.7, which will be released later this year in June. This upcoming release will feature a standalone MariaDB connector, user-friendly offset manipulation, read-only incremental snapshots for relational connectors, and possibly a sneak peek at the first PoC for Debezium Server’s UI.

    This next quarter is equally ambitious with its deliverables, and we’d like to ask you to join the conversation. You can read all the details on the project’s 2024 road map, and get in touch with us on the mailing list or Zulip chat. We would love to hear your feedback on the road map and any suggestions you may that may not be included.

    This upcoming quarter will mark the last and final release in the Debezium 2.x release stream with Debezium 2.7. With a new major release brewing, this is now the time for code clean-up and deprecation removal. If you have not taken a moment to review features that may have been scheduled for removal, we ask that you do and offer your feedback as soon as possible. We want to guarantee that the transition to Debezium 3 is as much of a drop-in replacement as possible, but we cannot do that without your help.

    With spring in full swing, don’t forget to stop and enjoy the roses. Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/04/12/debezium-2-6-1-final-released/index.html b/blog/2024/04/12/debezium-2-6-1-final-released/index.html index 0320f07851..21752c9b1d 100644 --- a/blog/2024/04/12/debezium-2-6-1-final-released/index.html +++ b/blog/2024/04/12/debezium-2-6-1-final-released/index.html @@ -1,3 +1,3 @@ Debezium 2.6.1.Final Released

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    New features and improvements

    Debezium 2.6.1.Final also introduces many improvements and features, lets take a look at each individually.

    Oracle RAW data type to STRING converter

    Debezium treats a RAW column type as a series of bytes, and therefore, change events that contain RAW columns use a schema type of BYTES. This is a reasonable default because Debezium does not know the application’s purpose for using a RAW column, and it is packaging the data from the column as-is. However, this may not be ideal for consumer applications that would prefer the data emitted as a STRING type rather than as BYTES.

    To help users, we’ve introduced a new RawToStringConverter, which automatically emits RAW columns as STRING based types rather than the default of BYTES (DBZ-7753).

    To configure the converter, simply add the following configuration:

    converters=raw-to-string
     converters.raw-to-string.type=io.debezium.connector.oracle.converters.RawToStringConverter
    -converters.raw-to-string.selector=.*.MY_TABLE.MY_RAW_COLUMN

    The selector property allows the converter to specifically target one or multiply RAW columns across one or more tables by providing a comma-separated list of regular expressions.

    The selector property is optional, and if omitted, the converter applies to ALL RAW columns.

    Fixes and stability improvements

    Debezium Server snapshotting fixed

    If you attempted to perform or re-execute the snapshot phase with Debezium Server in 2.6.0.Final, the process would fail with an error about acquiring a lock. This was quickly identified as it severely affected the ability to use any Debezium Connector under Debezium Server based on 2.6.0.Final.

    We have adjusted how we resolve the components needed for the new Snapshotter API so that when multiple connectors exist on the same class path, the resolution appropriate picks the right component. In addition, we’ve also improved the Debezium Server test suite to look specifically for these types of issues moving forward to avoid the potential release of such a build that has similar problems.

    Debezium Server users should move to 2.6.1.Final and not use 2.6.0.Final.

    Other fixes

    • Incremental Snapshot: read duplicate data when database has 1000 tables DBZ-7716

    • SQLServerConnectorIT.shouldNotStreamWhenUsingSnapshotModeInitialOnly check an old log message DBZ-7729

    • Snapshot fails with an error of invalid lock DBZ-7732

    • Column CON_ID queried on V$THREAD is not available in Oracle 11 DBZ-7737

    • Redis NOAUTH Authentication Error when DB index is specified DBZ-7740

    • Getting oldest transaction in Oracle buffer can cause NoSuchElementException with Infinispan DBZ-7741

    • The MySQL Debezium connector is not doing the snapshot after the reset. DBZ-7743

    • MongoDb connector doesn’t work with Load Balanced cluster DBZ-7744

    • Pod Security Context not set from template DBZ-7749

    • Apply MySQL binlog client version 0.29.1 - bugfix: read long value when deserializing gtid transaction’s length DBZ-7757

    • Change streaming exceptions are swallowed by BufferedChangeStreamCursor DBZ-7759

    • Sql-Server connector fails after initial start / processed record on subsequent starts DBZ-7765

    • Valid resume token is considered invalid which leads to new snapshot with some snapshot modes DBZ-7770

    In total there were 14 issues resolved in Debezium 2.6.1.Final. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Andrey Pustovetov, Anisha Mohanty, Chris Cranford, Chris Recalis, Jakub Cechacek, Jiri Pechanec, Lourens Naudé, Mario Fiore Vitale, Robert Roldan, and Selman Genç!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +converters.raw-to-string.selector=.*.MY_TABLE.MY_RAW_COLUMN

    The selector property allows the converter to specifically target one or multiply RAW columns across one or more tables by providing a comma-separated list of regular expressions.

    The selector property is optional, and if omitted, the converter applies to ALL RAW columns.

    Fixes and stability improvements

    Debezium Server snapshotting fixed

    If you attempted to perform or re-execute the snapshot phase with Debezium Server in 2.6.0.Final, the process would fail with an error about acquiring a lock. This was quickly identified as it severely affected the ability to use any Debezium Connector under Debezium Server based on 2.6.0.Final.

    We have adjusted how we resolve the components needed for the new Snapshotter API so that when multiple connectors exist on the same class path, the resolution appropriate picks the right component. In addition, we’ve also improved the Debezium Server test suite to look specifically for these types of issues moving forward to avoid the potential release of such a build that has similar problems.

    Debezium Server users should move to 2.6.1.Final and not use 2.6.0.Final.

    Other fixes

    • Incremental Snapshot: read duplicate data when database has 1000 tables DBZ-7716

    • SQLServerConnectorIT.shouldNotStreamWhenUsingSnapshotModeInitialOnly check an old log message DBZ-7729

    • Snapshot fails with an error of invalid lock DBZ-7732

    • Column CON_ID queried on V$THREAD is not available in Oracle 11 DBZ-7737

    • Redis NOAUTH Authentication Error when DB index is specified DBZ-7740

    • Getting oldest transaction in Oracle buffer can cause NoSuchElementException with Infinispan DBZ-7741

    • The MySQL Debezium connector is not doing the snapshot after the reset. DBZ-7743

    • MongoDb connector doesn’t work with Load Balanced cluster DBZ-7744

    • Pod Security Context not set from template DBZ-7749

    • Apply MySQL binlog client version 0.29.1 - bugfix: read long value when deserializing gtid transaction’s length DBZ-7757

    • Change streaming exceptions are swallowed by BufferedChangeStreamCursor DBZ-7759

    • Sql-Server connector fails after initial start / processed record on subsequent starts DBZ-7765

    • Valid resume token is considered invalid which leads to new snapshot with some snapshot modes DBZ-7770

    In total there were 14 issues resolved in Debezium 2.6.1.Final. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Andrey Pustovetov, Anisha Mohanty, Chris Cranford, Chris Recalis, Jakub Cechacek, Jiri Pechanec, Lourens Naudé, Mario Fiore Vitale, Robert Roldan, and Selman Genç!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/04/25/debezium-2-7-alpha1-released/index.html b/blog/2024/04/25/debezium-2-7-alpha1-released/index.html index 5593e99363..96d0a69f17 100644 --- a/blog/2024/04/25/debezium-2-7-alpha1-released/index.html +++ b/blog/2024/04/25/debezium-2-7-alpha1-released/index.html @@ -1 +1 @@ - Debezium 2.7.0.Alpha1 Released

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Core
    • It was identified that certain JDBC queries could indefinitely block in the case of certain communication failures. To combat this problem, a new configurable timeout option, query.timeout.ms is available to set the maximum time that a JDBC query can execute before being terminated (DBZ-7616).

    SQL Server
    • The SQL Server connector previously processed all transactions captured during a single database round trip. This behavior is configurable and is based on max.iterations.transactions, which defaults to processing all transactions (value of 0). This could lead to unexpected out of memory conditions if your database has a high volume of transactions.

      To address this for these use cases, the default value for max.iterations.transactions has changed to 500, to be more resilient for these deployment use cases out-of-the-box. If you want to return to the previous behavior, simply add this configuration option to your connector with a value of 0 (DBZ-7750).

    New features and improvements

    Debezium 2.7.0.Alpha1 also introduces many improvements and features, lets take a look at each individually.

    Install Debezium Operator with Helm Chart

    To improve the deployment of the Debezium Operator, it can be installed with a helm chart at https://charts.debezium.io. This avoids the overly complicated deployment model of installing the operator into separate namespaces, minimizing the complexities for managing multiple Debezium Server deployments on Kubernetes.

    Support predicate conditions for MongoDB incremental snapshots

    The incremental snapshot process is an instrumental part in various recovery situations to collect whole or part of the data set from a source table or collection. Relational connectors have long supported the idea of supplying an additional-conditions value on the incremental snapshot signal to restrict the data set, providing for targeted resynchronization of specific rows of data.

    We’re happy to announce that this is now possible with MongoDB (DBZ-7138). Unlike relational databases, the additional-conditions should be supplied in JSON format. It will be applied to the specified collection using the find operation to obtain the subset list of documents that are to be incrementally snapshotted.

    New MariaDB standalone connector

    Debezium 2.5 introduced official support for MariaDB as part of the existing MySQL connector. The next step in that evolution is here, with a new standalone connector implementation for MariaDB (DBZ-7693).

    There are few things worth noting here:

    • MariaDB and MySQL both have a common shared dependency on a new abstract connector called debezium-connector-binlog, which provides the common framework for both binlog-based connectors.

    • Each standalone connector now specifically is tailored only to its target database, so MySQL users should use MySQL and MariaDB users should use MariaDB. As a result, the connection.adapter configuration option has been removed, and the jdbc.protocol configuration option is now only specific to certain MySQL use cases and not used by MariaDB.

    The documentation for this connector is still a work-in-progress and will be added in the future. For the moment, you can refer to the MySQL connector documentation for most things related to MariaDB.

    ExtractNewDocumentState includes document id for MongoDB deletes

    In prior release of the MongoDB ExtractNewDocumentState single message transformation, a delete event did not provide the identifier as part of the payload. This reduced the meaningfulness of delete events as consumers were supplied with insufficient data to act on these events. This behavior has been improved, and the delete event now includes an _id attribute in the payload (DBZ-7695).

    Transaction metadata encoded ordering

    In some pipelines, ordering is critical for consuming applications. There are certain scenarios that can impact this aspect of your data pipeline, such as when Kafka re-partition occur. This leads to problems that can be error-prone trying to reconstruct the ordering after-the-fact.

    Now when Transaction Metadata is enabled, these metadata events will also encode their transaction order, so that in the event that a Kafka re-partition or other scenarios occur that alter the ordering semantics, consumers can simply use the new encoded ordering field instead for deterministic ordering of transactions (DBZ-7698).

    Blocking incremental snapshot improvements

    There are some use cases where incremental snapshot signals require escaping certain characters in the fully-qualified table name. This caused some problems with blocking snapshots because the process to resolve what tables to snapshot used a slightly different mechanism. In Debezium 2.7, we’ve unified this approach, and you can now use escaped table names with blocking snapshots where applicable (DBZ-7718).

    Cassandra performance improvement

    The Cassandra connector also saw some changes in Debezium 2.7, specifically to performance optimizations. The implementation of the KafkaRecordEmitter relied on a thread-synchronization block that reduced the throughput. In addition, the implementation also performed some unnecessary flushing which also impacted performance. This code has been rewritten to improve both throughput and reduce the unnecessary flush calls (DBZ-7722).

    New Oracle "RawToString" custom converter

    While Oracle recommends that users avoid using RAW-based columns, these columns are still widely used in standard Oracle tables for backward compatibility reasons. But there are also business use cases where it makes sense to continue to use RAW columns rather than other data types.

    Debezium 2.7 introduces a new custom converter specifically for Oracle called RawToStringConverter (DBZ-7753). This custom converter is designed to allow you to quickly convert the byte-array contents of the RAW column to a string-based field using a STRING schema type. This can be useful for situations where you use a RAW column to store character data that doesn’t require the collation overhead of VARCHAR2, but you still have the need for this field to be sent to consumers as string-based data.

    To get started with this custom converter, please see the documentation for more details.

    Improved NLS character-set support for Oracle

    When installing the Debezium 2.7 Oracle connector, you may notice a new dependency, orai18n.jar. This dependency is being automatically distributed to provide extended character-set support for certain dialects (DBZ-7761).

    Improved temporal support in Vitess

    Debezium relational connectors rely on a configuration option, time.precision.mode, to control how temporal values are added to change events. In some cases, you may want to use modes that align with Kafka types, using the connect mode. In other cases, you may prefer to avoid precision loss by using the default, adaptive_milliseconds mode.

    The Debezium for Vitess connector has traditionally not followed this model, and instead has emitted temporal values as string-based types. While this helps avoid the loss of precision problem when using the connect mode, this adds unnecessary overhead on consumers to parse and manipulate these values.

    In Debezium 2.7, Vitess aligns this behavior with other relational connectors, using the time.precision.mode to control how temporal values are sent (DBZ-7773). By default, it will use the adaptive_milliseconds mode, but you can customize this to use connect mode if you prefer. The emission of string-based temporal values has been removed.

    Other changes

    Altogether, 50 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • Builtin database name filter is incorrectly applied only to collections instead of databases in snapshot DBZ-7485

    • Upgrade Debezium Quarkus Outbox to Quarkus 3.9.2 DBZ-7663

    • After the initial deployment of Debezium, if a new table is added to MSSQL, its schema is was captured DBZ-7697

    • The test is failing because wrong topics are used DBZ-7715

    • Incremental Snapshot: read duplicate data when database has 1000 tables DBZ-7716

    • Handle instability in JDBC connector system tests DBZ-7726

    • SQLServerConnectorIT.shouldNotStreamWhenUsingSnapshotModeInitialOnly check an old log message DBZ-7729

    • Fix MongoDB unwrap SMT test DBZ-7731

    • Snapshot fails with an error of invalid lock DBZ-7732

    • Column CON_ID queried on V$THREAD is not available in Oracle 11 DBZ-7737

    • Redis NOAUTH Authentication Error when DB index is specified DBZ-7740

    • Getting oldest transaction in Oracle buffer can cause NoSuchElementException with Infinispan DBZ-7741

    • The MySQL Debezium connector is not doing the snapshot after the reset. DBZ-7743

    • MongoDb connector doesn’t work with Load Balanced cluster DBZ-7744

    • Align unwrap tests to respect AT LEAST ONCE delivery DBZ-7746

    • Exclude reload4j from Kafka connect dependencies in system testsuite DBZ-7748

    • Pod Security Context not set from template DBZ-7749

    • Apply MySQL binlog client version 0.29.1 - bugfix: read long value when deserializing gtid transaction’s length DBZ-7757

    • Change streaming exceptions are swallowed by BufferedChangeStreamCursor DBZ-7759

    • Use thread cap only for default value DBZ-7763

    • Evaluate cached thread pool as the default option for async embedded engine DBZ-7764

    • Sql-Server connector fails after initial start / processed record on subsequent starts DBZ-7765

    • Valid resume token is considered invalid which leads to new snapshot with some snapshot modes DBZ-7770

    • Improve processing speed of async engine processors which use List#get() DBZ-7777

    • NO_DATA snapshot mode validation throw DebeziumException on restarts if snapshot is not completed DBZ-7780

    • DDL statement couldn’t be parsed DBZ-7788

    • Document potential null values in the after field for lookup full update type DBZ-7789

    • old class reference in ibmi-connector services DBZ-7795

    • Documentation for Debezium Scripting mentions wrong property DBZ-7798

    • Fix invalid date/timestamp check & logging level DBZ-7811

    What’s next?

    Debezium 2.7 is just getting underway and we have a number of additional changes planned, including a MongoDB sink connector, expanding Oracle 23 support, a new SPI to aid in the memory-footprint of certain multi-tenant schema architectures and more. You can find more about what is planned for Debezium 2.7 on our road map.

    The team is also in the final stages of defining our face-to-face agenda. if you have any suggestions or ideas that you would like for us to discuss or would like to see planned in 2.7 or a future release, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.7.0.Alpha1 Released

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Core
    • It was identified that certain JDBC queries could indefinitely block in the case of certain communication failures. To combat this problem, a new configurable timeout option, query.timeout.ms is available to set the maximum time that a JDBC query can execute before being terminated (DBZ-7616).

    SQL Server
    • The SQL Server connector previously processed all transactions captured during a single database round trip. This behavior is configurable and is based on max.iterations.transactions, which defaults to processing all transactions (value of 0). This could lead to unexpected out of memory conditions if your database has a high volume of transactions.

      To address this for these use cases, the default value for max.iterations.transactions has changed to 500, to be more resilient for these deployment use cases out-of-the-box. If you want to return to the previous behavior, simply add this configuration option to your connector with a value of 0 (DBZ-7750).

    New features and improvements

    Debezium 2.7.0.Alpha1 also introduces many improvements and features, lets take a look at each individually.

    Install Debezium Operator with Helm Chart

    To improve the deployment of the Debezium Operator, it can be installed with a helm chart at https://charts.debezium.io. This avoids the overly complicated deployment model of installing the operator into separate namespaces, minimizing the complexities for managing multiple Debezium Server deployments on Kubernetes.

    Support predicate conditions for MongoDB incremental snapshots

    The incremental snapshot process is an instrumental part in various recovery situations to collect whole or part of the data set from a source table or collection. Relational connectors have long supported the idea of supplying an additional-conditions value on the incremental snapshot signal to restrict the data set, providing for targeted resynchronization of specific rows of data.

    We’re happy to announce that this is now possible with MongoDB (DBZ-7138). Unlike relational databases, the additional-conditions should be supplied in JSON format. It will be applied to the specified collection using the find operation to obtain the subset list of documents that are to be incrementally snapshotted.

    New MariaDB standalone connector

    Debezium 2.5 introduced official support for MariaDB as part of the existing MySQL connector. The next step in that evolution is here, with a new standalone connector implementation for MariaDB (DBZ-7693).

    There are few things worth noting here:

    • MariaDB and MySQL both have a common shared dependency on a new abstract connector called debezium-connector-binlog, which provides the common framework for both binlog-based connectors.

    • Each standalone connector now specifically is tailored only to its target database, so MySQL users should use MySQL and MariaDB users should use MariaDB. As a result, the connection.adapter configuration option has been removed, and the jdbc.protocol configuration option is now only specific to certain MySQL use cases and not used by MariaDB.

    The documentation for this connector is still a work-in-progress and will be added in the future. For the moment, you can refer to the MySQL connector documentation for most things related to MariaDB.

    ExtractNewDocumentState includes document id for MongoDB deletes

    In prior release of the MongoDB ExtractNewDocumentState single message transformation, a delete event did not provide the identifier as part of the payload. This reduced the meaningfulness of delete events as consumers were supplied with insufficient data to act on these events. This behavior has been improved, and the delete event now includes an _id attribute in the payload (DBZ-7695).

    Transaction metadata encoded ordering

    In some pipelines, ordering is critical for consuming applications. There are certain scenarios that can impact this aspect of your data pipeline, such as when Kafka re-partition occur. This leads to problems that can be error-prone trying to reconstruct the ordering after-the-fact.

    Now when Transaction Metadata is enabled, these metadata events will also encode their transaction order, so that in the event that a Kafka re-partition or other scenarios occur that alter the ordering semantics, consumers can simply use the new encoded ordering field instead for deterministic ordering of transactions (DBZ-7698).

    Blocking incremental snapshot improvements

    There are some use cases where incremental snapshot signals require escaping certain characters in the fully-qualified table name. This caused some problems with blocking snapshots because the process to resolve what tables to snapshot used a slightly different mechanism. In Debezium 2.7, we’ve unified this approach, and you can now use escaped table names with blocking snapshots where applicable (DBZ-7718).

    Cassandra performance improvement

    The Cassandra connector also saw some changes in Debezium 2.7, specifically to performance optimizations. The implementation of the KafkaRecordEmitter relied on a thread-synchronization block that reduced the throughput. In addition, the implementation also performed some unnecessary flushing which also impacted performance. This code has been rewritten to improve both throughput and reduce the unnecessary flush calls (DBZ-7722).

    New Oracle "RawToString" custom converter

    While Oracle recommends that users avoid using RAW-based columns, these columns are still widely used in standard Oracle tables for backward compatibility reasons. But there are also business use cases where it makes sense to continue to use RAW columns rather than other data types.

    Debezium 2.7 introduces a new custom converter specifically for Oracle called RawToStringConverter (DBZ-7753). This custom converter is designed to allow you to quickly convert the byte-array contents of the RAW column to a string-based field using a STRING schema type. This can be useful for situations where you use a RAW column to store character data that doesn’t require the collation overhead of VARCHAR2, but you still have the need for this field to be sent to consumers as string-based data.

    To get started with this custom converter, please see the documentation for more details.

    Improved NLS character-set support for Oracle

    When installing the Debezium 2.7 Oracle connector, you may notice a new dependency, orai18n.jar. This dependency is being automatically distributed to provide extended character-set support for certain dialects (DBZ-7761).

    Improved temporal support in Vitess

    Debezium relational connectors rely on a configuration option, time.precision.mode, to control how temporal values are added to change events. In some cases, you may want to use modes that align with Kafka types, using the connect mode. In other cases, you may prefer to avoid precision loss by using the default, adaptive_milliseconds mode.

    The Debezium for Vitess connector has traditionally not followed this model, and instead has emitted temporal values as string-based types. While this helps avoid the loss of precision problem when using the connect mode, this adds unnecessary overhead on consumers to parse and manipulate these values.

    In Debezium 2.7, Vitess aligns this behavior with other relational connectors, using the time.precision.mode to control how temporal values are sent (DBZ-7773). By default, it will use the adaptive_milliseconds mode, but you can customize this to use connect mode if you prefer. The emission of string-based temporal values has been removed.

    Other changes

    Altogether, 50 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • Builtin database name filter is incorrectly applied only to collections instead of databases in snapshot DBZ-7485

    • Upgrade Debezium Quarkus Outbox to Quarkus 3.9.2 DBZ-7663

    • After the initial deployment of Debezium, if a new table is added to MSSQL, its schema is was captured DBZ-7697

    • The test is failing because wrong topics are used DBZ-7715

    • Incremental Snapshot: read duplicate data when database has 1000 tables DBZ-7716

    • Handle instability in JDBC connector system tests DBZ-7726

    • SQLServerConnectorIT.shouldNotStreamWhenUsingSnapshotModeInitialOnly check an old log message DBZ-7729

    • Fix MongoDB unwrap SMT test DBZ-7731

    • Snapshot fails with an error of invalid lock DBZ-7732

    • Column CON_ID queried on V$THREAD is not available in Oracle 11 DBZ-7737

    • Redis NOAUTH Authentication Error when DB index is specified DBZ-7740

    • Getting oldest transaction in Oracle buffer can cause NoSuchElementException with Infinispan DBZ-7741

    • The MySQL Debezium connector is not doing the snapshot after the reset. DBZ-7743

    • MongoDb connector doesn’t work with Load Balanced cluster DBZ-7744

    • Align unwrap tests to respect AT LEAST ONCE delivery DBZ-7746

    • Exclude reload4j from Kafka connect dependencies in system testsuite DBZ-7748

    • Pod Security Context not set from template DBZ-7749

    • Apply MySQL binlog client version 0.29.1 - bugfix: read long value when deserializing gtid transaction’s length DBZ-7757

    • Change streaming exceptions are swallowed by BufferedChangeStreamCursor DBZ-7759

    • Use thread cap only for default value DBZ-7763

    • Evaluate cached thread pool as the default option for async embedded engine DBZ-7764

    • Sql-Server connector fails after initial start / processed record on subsequent starts DBZ-7765

    • Valid resume token is considered invalid which leads to new snapshot with some snapshot modes DBZ-7770

    • Improve processing speed of async engine processors which use List#get() DBZ-7777

    • NO_DATA snapshot mode validation throw DebeziumException on restarts if snapshot is not completed DBZ-7780

    • DDL statement couldn’t be parsed DBZ-7788

    • Document potential null values in the after field for lookup full update type DBZ-7789

    • old class reference in ibmi-connector services DBZ-7795

    • Documentation for Debezium Scripting mentions wrong property DBZ-7798

    • Fix invalid date/timestamp check & logging level DBZ-7811

    What’s next?

    Debezium 2.7 is just getting underway and we have a number of additional changes planned, including a MongoDB sink connector, expanding Oracle 23 support, a new SPI to aid in the memory-footprint of certain multi-tenant schema architectures and more. You can find more about what is planned for Debezium 2.7 on our road map.

    The team is also in the final stages of defining our face-to-face agenda. if you have any suggestions or ideas that you would like for us to discuss or would like to see planned in 2.7 or a future release, please feel free to get in touch with us on our mailing list or in our Zulip chat.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/05/13/debezium-2-7-alpha2-released/index.html b/blog/2024/05/13/debezium-2-7-alpha2-released/index.html index d9f09eabde..97ca16ca4f 100644 --- a/blog/2024/05/13/debezium-2-7-alpha2-released/index.html +++ b/blog/2024/05/13/debezium-2-7-alpha2-released/index.html @@ -1 +1 @@ - Debezium 2.7.0.Alpha2 Released

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    New features and improvements

    Debezium 2.7.0.Alpha2 also introduces many improvements and features, lets take a look at each individually.

    Oracle ROW_ID included in change events

    While ROW_ID is not unique across all rows of a table for the table’s lifespan, it can be used in certain situations when the lifecycle of the table and rows are managed in a very strict way. At the community’s request, we’ve added a new row_id field to the Oracle connector’s change event source information block (DBZ-4332). This new field will be populated with the ROW_ID value under the following conditions:

    • Only populated from streaming events for inserts, updates, and deletes.

    • Snapshot evnets will not contain a row_id value.

    • Only provided by the LogMiner and XStream streaming adapters, OpenLogReplicator is not supported.

    Any event that does not match the criteria will not include a row_id field as its marked as optional.

    PostreSQL Arrays with the JDBC sink

    The JDBC sink connector supports the use of mapping source columns to Kafka ARRAY-based payload field types. With Debezium 2.7, you can now serialize ARRAY-based fields to a target PostgreSQL database, with no change in configuration. The new support should be completely transparent (DBZ-7752).

    Oracle flush table with custom schema names

    In prior versions of Debezium, the Oracle connector was strictly designed to create the LogMiner flush table in the default tablespace of the connector user account. This wasn’t always useful in situations where the user’s default tablespace may not be the ideal destination and the DBA would prefer that table to exist in a separate tablespace.

    Previously, users would need to modify the user account or use a new user with the correct tablespace to have the table created in the right tablespace location. With Debezium 2.7, this is no longer required, and you can safely include the name of the target schema/tablespace in the configuration (DBZ-7819).

    Example using a custom schema name
    log.mining.flush.table.name=THE_OTHER_SCHEMA.LOG_MINING_FLUSH_TABLE

    The schema name is optional and if not supplied, the connector continues to use the same legacy behavior of creating the flush table and checking for its existence in the user’s default tablespace.

    NATS authentication with JWT/seed

    The Debezium Server NATs streaming sink adapter was improved, supporting JWT/seed based authentication (DBZ-7829). To get started using the JWT/seed-based authentication, supply the following necessary values in the configuration:

    JWT authentication example
    debezium.sink.nats-jetstream.auth.jwt=<your_jwt_token>
    Seed authentication example
    debezium.sink.nats-jetstream.auth.seed=<your_nkey_seed>

    For this and more, please see the NATS documentation for details about JWT and NKey seed based authentication.

    Oracle query filter with large numbers of tables

    The Debezium Oracle connector can support thousands of tables in a single connector deployment with ease; however, you may have found you wanted to customize the query filter using the IN mode. This mode is used in situations where you may have a high volume of changes for other tables and you want to filter that dataset out at the database level before the changes are passed to Debezium for processing.

    In earlier versions, users may have noticed that setting log.mining.query.filter.mode with a value of in and where your table include list contained more than 1000 elements generated a SQL error. Oracle does not permit more than 1000 elements within an in-clause; however, Debezium 2.7 addresses this limitation by using a disjunction between multiple buckets of 1000 item in-clause lists (DBZ-7847).

    Other changes

    Altogether, 27 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • Log exception details early in case MySQL keep-alive causes deadlock on shutdown DBZ-7570

    • Extend mongodb system tests with ssl option DBZ-7605

    • > io.debezium.text.ParsingException : SQL Contains Partition DBZ-7805

    • Ad-hoc blocking snapshot not working through file channeling without inserting a row in the database. DBZ-7806

    • Postgres: Potential data loss on connector restart DBZ-7816

    • DEBEZIUM_VERSION is wrongly set to 2.6.0.Alpha1 DBZ-7827

    • Sql Server incorrectly applying quoted snapshot statement overrides DBZ-7828

    • Debezium JDBC Sink not handle order correctly DBZ-7830

    • Bump Outbox Extension to Quarkus 3.10.0 DBZ-7842

    • Support Oracle DDL Alter Audit Policy DBZ-7864

    • Support Oracle DDL Create Audit Policy DBZ-7865

    What’s next?

    We have our team face-to-face next week, and it’s going to be absolutely fantastic since its the first time we get to meet in person since the Covid pandemic. We’re going to use this time to discuss all the community feedback we’re received throughout Debezium 2, reflect on what worked, and put together action plans for what didn’t.

    The main focus for our meeting is to develop an action plan for Debezium 3.0 and beyond, assign tasks and priorities across the team so that as we focus on Debezium 3 next quarter, we can make this next major release an easy replacement for the community while also delivering a new, refreshing, feature-rich major version. We will be updating the roadmap and deliverables when we’re back, so be sure to stay tuned to our road map.

    In terms of Debezium 2.7, we’re halfway through the quarter, and we’re about to turn our focus on the last half where we will address any bugs, regressions, and polish new features. If you have the chance to test-drive the pre-releases, we strongly encourage you to do so and file bug reports.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.7.0.Alpha2 Released

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    New features and improvements

    Debezium 2.7.0.Alpha2 also introduces many improvements and features, lets take a look at each individually.

    Oracle ROW_ID included in change events

    While ROW_ID is not unique across all rows of a table for the table’s lifespan, it can be used in certain situations when the lifecycle of the table and rows are managed in a very strict way. At the community’s request, we’ve added a new row_id field to the Oracle connector’s change event source information block (DBZ-4332). This new field will be populated with the ROW_ID value under the following conditions:

    • Only populated from streaming events for inserts, updates, and deletes.

    • Snapshot evnets will not contain a row_id value.

    • Only provided by the LogMiner and XStream streaming adapters, OpenLogReplicator is not supported.

    Any event that does not match the criteria will not include a row_id field as its marked as optional.

    PostreSQL Arrays with the JDBC sink

    The JDBC sink connector supports the use of mapping source columns to Kafka ARRAY-based payload field types. With Debezium 2.7, you can now serialize ARRAY-based fields to a target PostgreSQL database, with no change in configuration. The new support should be completely transparent (DBZ-7752).

    Oracle flush table with custom schema names

    In prior versions of Debezium, the Oracle connector was strictly designed to create the LogMiner flush table in the default tablespace of the connector user account. This wasn’t always useful in situations where the user’s default tablespace may not be the ideal destination and the DBA would prefer that table to exist in a separate tablespace.

    Previously, users would need to modify the user account or use a new user with the correct tablespace to have the table created in the right tablespace location. With Debezium 2.7, this is no longer required, and you can safely include the name of the target schema/tablespace in the configuration (DBZ-7819).

    Example using a custom schema name
    log.mining.flush.table.name=THE_OTHER_SCHEMA.LOG_MINING_FLUSH_TABLE

    The schema name is optional and if not supplied, the connector continues to use the same legacy behavior of creating the flush table and checking for its existence in the user’s default tablespace.

    NATS authentication with JWT/seed

    The Debezium Server NATs streaming sink adapter was improved, supporting JWT/seed based authentication (DBZ-7829). To get started using the JWT/seed-based authentication, supply the following necessary values in the configuration:

    JWT authentication example
    debezium.sink.nats-jetstream.auth.jwt=<your_jwt_token>
    Seed authentication example
    debezium.sink.nats-jetstream.auth.seed=<your_nkey_seed>

    For this and more, please see the NATS documentation for details about JWT and NKey seed based authentication.

    Oracle query filter with large numbers of tables

    The Debezium Oracle connector can support thousands of tables in a single connector deployment with ease; however, you may have found you wanted to customize the query filter using the IN mode. This mode is used in situations where you may have a high volume of changes for other tables and you want to filter that dataset out at the database level before the changes are passed to Debezium for processing.

    In earlier versions, users may have noticed that setting log.mining.query.filter.mode with a value of in and where your table include list contained more than 1000 elements generated a SQL error. Oracle does not permit more than 1000 elements within an in-clause; however, Debezium 2.7 addresses this limitation by using a disjunction between multiple buckets of 1000 item in-clause lists (DBZ-7847).

    Other changes

    Altogether, 27 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • Log exception details early in case MySQL keep-alive causes deadlock on shutdown DBZ-7570

    • Extend mongodb system tests with ssl option DBZ-7605

    • > io.debezium.text.ParsingException : SQL Contains Partition DBZ-7805

    • Ad-hoc blocking snapshot not working through file channeling without inserting a row in the database. DBZ-7806

    • Postgres: Potential data loss on connector restart DBZ-7816

    • DEBEZIUM_VERSION is wrongly set to 2.6.0.Alpha1 DBZ-7827

    • Sql Server incorrectly applying quoted snapshot statement overrides DBZ-7828

    • Debezium JDBC Sink not handle order correctly DBZ-7830

    • Bump Outbox Extension to Quarkus 3.10.0 DBZ-7842

    • Support Oracle DDL Alter Audit Policy DBZ-7864

    • Support Oracle DDL Create Audit Policy DBZ-7865

    What’s next?

    We have our team face-to-face next week, and it’s going to be absolutely fantastic since its the first time we get to meet in person since the Covid pandemic. We’re going to use this time to discuss all the community feedback we’re received throughout Debezium 2, reflect on what worked, and put together action plans for what didn’t.

    The main focus for our meeting is to develop an action plan for Debezium 3.0 and beyond, assign tasks and priorities across the team so that as we focus on Debezium 3 next quarter, we can make this next major release an easy replacement for the community while also delivering a new, refreshing, feature-rich major version. We will be updating the roadmap and deliverables when we’re back, so be sure to stay tuned to our road map.

    In terms of Debezium 2.7, we’re halfway through the quarter, and we’re about to turn our focus on the last half where we will address any bugs, regressions, and polish new features. If you have the chance to test-drive the pre-releases, we strongly encourage you to do so and file bug reports.

    Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/05/30/debezium-2-6-2-final-released/index.html b/blog/2024/05/30/debezium-2-6-2-final-released/index.html index 4268b3393b..655c7c95ba 100644 --- a/blog/2024/05/30/debezium-2-6-2-final-released/index.html +++ b/blog/2024/05/30/debezium-2-6-2-final-released/index.html @@ -1 +1 @@ - Debezium 2.6.2.Final Released

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    New features and improvements

    Debezium 2.6.2.Final introduces one new feature, lets take a look at it.

    Oracle database query filtering with 1000+ tables

    The Debezium Oracle connector allows configuring the table include and exclude lists to be applied as part of the database query for efficiency reasons using the log.mining.query.filter.mode. When setting this configuration property to a value of in, the query is constructed using a SQL in-clause. In Oracle, an in-clause is restricted to one thousand elements, and if your connector configuration exceeded this limit, the query would fail to execute.

    Debezium 2.6 addresses this by creating sub-buckets of exactly one thousand elements and constructs the query using multiple in-clauses separated by a disjunction if the table include and exclude lists are too large. This improves the user experience, allowing a single connector to capture more than one thousand tables with a single deployment, using the query filter mode, and without resorting to regular expressions. (DBZ-7847).

    Fixes and stability improvements

    Debezium 2.6.2.Final introduces several new bug fixes and stability improvements, lets take a look at a few of them.

    PostgreSQL offset flush race condition

    The Debezium PostgreSQL connector requires that not only do we publish offset metadata to Kafka to handle connector restarts, but the connector must also flush these details to the database’s underlying replication slot to prevent unbounded WAL growth.

    Using an example, let’s propose we have two changes for LSN 1 and 2. Both of these changes are written to two different partitions, A and B, respectively. As this happens asynchronously, it’s possible that the write to partition B succeeds while the write to partition A fails.

    If Debezium receives acknowledgement for partition B before A, the connector will synchronize the WAL confirmed LSN as 2. Should the connector fail before acknowledging partition A or get restarted due to a rebalance, upon connector restart the change for LSN 1 is lost.

    This problem has been fixed in Debezium 2.6 onward, and the replication slot’s confirmed LSN will correctly match the state in the topics (DBZ-7816).

    Avro compatibility

    In Debezium 2.5, we introduced several new timestamp fields, ts_us, and ts_ns, which represent the millisecond-based time values in microseconds and nanoseconds. Unfortunately, these fields were not introduced in a compatible way for users who use Avro (DBZ-7880).

    We have adjusted the change event schema so that the new fields introduced are optional, making the change event schema compatible with old versions so that users can upgrade to Debezium 2.6 or later without issues using Avro.

    Other fixes

    • After the initial deployment of Debezium, if a new table is added to MSSQL, its schema is was captured DBZ-7697

    • NO_DATA snapshot mode validation throw DebeziumException on restarts if snapshot is not completed DBZ-7780

    • DDL statement couldn’t be parsed DBZ-7788

    • old class reference in ibmi-connector services DBZ-7795

    • Ad-hoc blocking snapshot not working through file channeling without inserting a row in the database. DBZ-7806

    • Postgres: Potential data loss on connector restart DBZ-7816

    • Add c3p0 timeout configuration example to JDBC sink DBZ-7822

    • Sql Server incorrectly applying quoted snapshot statement overrides DBZ-7828

    • Debezium JDBC Sink not handle order correctly DBZ-7830

    • in the Cassandra documentation, there is a typo which should have been disable not Dusable. DBZ-7851

    • Debezium MySQL Snapshot Connector Fails DBZ-7858

    • Support Oracle DDL Alter Audit Policy DBZ-7864

    • Support Oracle DDL Create Audit Policy DBZ-7865

    • Default value of error retries not interpreted correctly DBZ-7870

    • Too much logs after Debezium update DBZ-7871

    • Avro schema compatibility issues when upgrading from Oracle Debezium 2.5.3.Final to 2.6.1.Final DBZ-7880

    • ParsingException (MySQL 8): create trigger if exists DBZ-7881

    • Debezium can’t handle columns with # in its name DBZ-7893

    • Oracle interval default values are not properly parsed DBZ-7898

    In total there were 20 issues resolved in Debezium 2.6.2.Final. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Anil Dasari, Chris Cranford, Duc Le Tu, Ilyas Ahsan, Jiri Pechanec, Jochen Schalanda, Mario Fiore Vitale, Michal Augustýn, Pradeep Nain, Robert Roldan, Robin Moffatt, Roman Kudryashov, and Vadzim Ramanenka!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.6.2.Final Released

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    New features and improvements

    Debezium 2.6.2.Final introduces one new feature, lets take a look at it.

    Oracle database query filtering with 1000+ tables

    The Debezium Oracle connector allows configuring the table include and exclude lists to be applied as part of the database query for efficiency reasons using the log.mining.query.filter.mode. When setting this configuration property to a value of in, the query is constructed using a SQL in-clause. In Oracle, an in-clause is restricted to one thousand elements, and if your connector configuration exceeded this limit, the query would fail to execute.

    Debezium 2.6 addresses this by creating sub-buckets of exactly one thousand elements and constructs the query using multiple in-clauses separated by a disjunction if the table include and exclude lists are too large. This improves the user experience, allowing a single connector to capture more than one thousand tables with a single deployment, using the query filter mode, and without resorting to regular expressions. (DBZ-7847).

    Fixes and stability improvements

    Debezium 2.6.2.Final introduces several new bug fixes and stability improvements, lets take a look at a few of them.

    PostgreSQL offset flush race condition

    The Debezium PostgreSQL connector requires that not only do we publish offset metadata to Kafka to handle connector restarts, but the connector must also flush these details to the database’s underlying replication slot to prevent unbounded WAL growth.

    Using an example, let’s propose we have two changes for LSN 1 and 2. Both of these changes are written to two different partitions, A and B, respectively. As this happens asynchronously, it’s possible that the write to partition B succeeds while the write to partition A fails.

    If Debezium receives acknowledgement for partition B before A, the connector will synchronize the WAL confirmed LSN as 2. Should the connector fail before acknowledging partition A or get restarted due to a rebalance, upon connector restart the change for LSN 1 is lost.

    This problem has been fixed in Debezium 2.6 onward, and the replication slot’s confirmed LSN will correctly match the state in the topics (DBZ-7816).

    Avro compatibility

    In Debezium 2.5, we introduced several new timestamp fields, ts_us, and ts_ns, which represent the millisecond-based time values in microseconds and nanoseconds. Unfortunately, these fields were not introduced in a compatible way for users who use Avro (DBZ-7880).

    We have adjusted the change event schema so that the new fields introduced are optional, making the change event schema compatible with old versions so that users can upgrade to Debezium 2.6 or later without issues using Avro.

    Other fixes

    • After the initial deployment of Debezium, if a new table is added to MSSQL, its schema is was captured DBZ-7697

    • NO_DATA snapshot mode validation throw DebeziumException on restarts if snapshot is not completed DBZ-7780

    • DDL statement couldn’t be parsed DBZ-7788

    • old class reference in ibmi-connector services DBZ-7795

    • Ad-hoc blocking snapshot not working through file channeling without inserting a row in the database. DBZ-7806

    • Postgres: Potential data loss on connector restart DBZ-7816

    • Add c3p0 timeout configuration example to JDBC sink DBZ-7822

    • Sql Server incorrectly applying quoted snapshot statement overrides DBZ-7828

    • Debezium JDBC Sink not handle order correctly DBZ-7830

    • in the Cassandra documentation, there is a typo which should have been disable not Dusable. DBZ-7851

    • Debezium MySQL Snapshot Connector Fails DBZ-7858

    • Support Oracle DDL Alter Audit Policy DBZ-7864

    • Support Oracle DDL Create Audit Policy DBZ-7865

    • Default value of error retries not interpreted correctly DBZ-7870

    • Too much logs after Debezium update DBZ-7871

    • Avro schema compatibility issues when upgrading from Oracle Debezium 2.5.3.Final to 2.6.1.Final DBZ-7880

    • ParsingException (MySQL 8): create trigger if exists DBZ-7881

    • Debezium can’t handle columns with # in its name DBZ-7893

    • Oracle interval default values are not properly parsed DBZ-7898

    In total there were 20 issues resolved in Debezium 2.6.2.Final. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Anil Dasari, Chris Cranford, Duc Le Tu, Ilyas Ahsan, Jiri Pechanec, Jochen Schalanda, Mario Fiore Vitale, Michal Augustýn, Pradeep Nain, Robert Roldan, Robin Moffatt, Roman Kudryashov, and Vadzim Ramanenka!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/06/06/debezium-2-7-beta1-released/index.html b/blog/2024/06/06/debezium-2-7-beta1-released/index.html index 4cfe3854dd..064fe8ef8a 100644 --- a/blog/2024/06/06/debezium-2-7-beta1-released/index.html +++ b/blog/2024/06/06/debezium-2-7-beta1-released/index.html @@ -12,4 +12,4 @@ enabled: true configFrom: key1: value1 - key2: value2

    In the custom resource, the jmxExporter.enabled toggles the exporter on or off. Additionally, the metrics configuration can be supplied using key/value pairs in the jmxExporter.configFrom section.

    Optional delay between snapshot & streaming

    Debezium 2.7 ships with a new global configuration option, streaming.delay.ms. This new option causes the connector to perform a delay before it starts the streaming phase (DBZ-7902).

    For some deployment use cases, you may want to guarantee that at least one offset flush interval has happened before the streaming phase begins. In such use cases, uses should guarantee that both properties, streaming.delay.ms and offset.flush.interval.ms are aligned.

    By default, Debezium will not perform a delay and immediately transitions to the streaming phase to remain consistent with prior version behavior.

    Other changes

    Altogether, 36 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • Debezium 1.9.2 cannot capture field that is date type of postgres DBZ-5182

    • Ensure vgtid remains local to shards streamed by task DBZ-6721

    • Decompose provide.transaction.metadata into components DBZ-6722

    • Handle Enum as String or Int DBZ-7792

    • Rewrite batch statement not supported for jdbc debezium sink DBZ-7845

    • Debezium MySQL Snapshot Connector Fails DBZ-7858

    • Reduce enum array allocation DBZ-7859

    • Snapshot retrying logic falls into infinite retry loop DBZ-7860

    • Bump Java in Debezium Server images DBZ-7861

    • Default value of error retries not interpreted correctly DBZ-7870

    • Avro schema compatibility issues when upgrading from Oracle Debezium 2.5.3.Final to 2.6.1.Final DBZ-7880

    • Improve offset and history storage configuration DBZ-7884

    • Oracle Debezium Connector cannot startup due to failing incremental snapshot DBZ-7886

    • Allow customizing ObjectMapper in JsonSerde DBZ-7887

    • Multiple completed reading from a capture instance notifications DBZ-7889

    • Debezium can’t handle columns with # in its name DBZ-7893

    • Oracle interval default values are not properly parsed DBZ-7898

    • Debezium server unable to shutdown on pubsub error DBZ-7904

    • Handle gtid without range only single position DBZ-7905

    • Enhancing the threads utility class for broader use DBZ-7906

    • Oracle connector cannot parse SUBPARTITION when altering table DBZ-7908

    • Make column exclude use keyspace not shard DBZ-7910

    • The explanation in the documentation is insufficient - metric DBZ-7912

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + key2: value2

    In the custom resource, the jmxExporter.enabled toggles the exporter on or off. Additionally, the metrics configuration can be supplied using key/value pairs in the jmxExporter.configFrom section.

    Optional delay between snapshot & streaming

    Debezium 2.7 ships with a new global configuration option, streaming.delay.ms. This new option causes the connector to perform a delay before it starts the streaming phase (DBZ-7902).

    For some deployment use cases, you may want to guarantee that at least one offset flush interval has happened before the streaming phase begins. In such use cases, uses should guarantee that both properties, streaming.delay.ms and offset.flush.interval.ms are aligned.

    By default, Debezium will not perform a delay and immediately transitions to the streaming phase to remain consistent with prior version behavior.

    Other changes

    Altogether, 36 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • Debezium 1.9.2 cannot capture field that is date type of postgres DBZ-5182

    • Ensure vgtid remains local to shards streamed by task DBZ-6721

    • Decompose provide.transaction.metadata into components DBZ-6722

    • Handle Enum as String or Int DBZ-7792

    • Rewrite batch statement not supported for jdbc debezium sink DBZ-7845

    • Debezium MySQL Snapshot Connector Fails DBZ-7858

    • Reduce enum array allocation DBZ-7859

    • Snapshot retrying logic falls into infinite retry loop DBZ-7860

    • Bump Java in Debezium Server images DBZ-7861

    • Default value of error retries not interpreted correctly DBZ-7870

    • Avro schema compatibility issues when upgrading from Oracle Debezium 2.5.3.Final to 2.6.1.Final DBZ-7880

    • Improve offset and history storage configuration DBZ-7884

    • Oracle Debezium Connector cannot startup due to failing incremental snapshot DBZ-7886

    • Allow customizing ObjectMapper in JsonSerde DBZ-7887

    • Multiple completed reading from a capture instance notifications DBZ-7889

    • Debezium can’t handle columns with # in its name DBZ-7893

    • Oracle interval default values are not properly parsed DBZ-7898

    • Debezium server unable to shutdown on pubsub error DBZ-7904

    • Handle gtid without range only single position DBZ-7905

    • Enhancing the threads utility class for broader use DBZ-7906

    • Oracle connector cannot parse SUBPARTITION when altering table DBZ-7908

    • Make column exclude use keyspace not shard DBZ-7910

    • The explanation in the documentation is insufficient - metric DBZ-7912

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/07/01/debezium-2-7-final-released/index.html b/blog/2024/07/01/debezium-2-7-final-released/index.html index 6eaa749a11..c13332b114 100644 --- a/blog/2024/07/01/debezium-2-7-final-released/index.html +++ b/blog/2024/07/01/debezium-2-7-final-released/index.html @@ -12,4 +12,4 @@ enabled: true configFrom: key1: value1 - key2: value2

    In the custom resource, the jmxExporter.enabled toggles the exporter on or off. Additionally, the metrics configuration can be supplied using key/value pairs in the jmxExporter.configFrom section.

    Stopping Debezium Server when scaled to zero

    Debezium Server is stopped when scaling the replicas to zero using the annotation, debezium.io/stop=true (DBZ-7953).

    Other changes

    What’s next & Outlook

    With the release of Debezium 2.7, the team is now switching gears and heavily focusing on the next major milestone, Debezium 3.0. This next major release includes a variety of changes, including but not limited to:

    • Java 17 as baseline

    • Kafka 3.1+ as baseline

    • New off-heap Oracle cache implementations based on EhCache & Hazelcast

    • Exactly-once semantics support for other relational connectors

    • Sink connector for MongoDB

    • and more

    This list represents a quick glance view at just what’s at the top of our queue, and is subject to change. If you’d like to get involved in the conversation about Debezium 3.0 and the next evolution of the project, contact us on the mailing list or Zulip chat. As always, please review our road map for more details.

    As summer is in full swing and holiday plans are starting for many, be safe. Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + key2: value2

    In the custom resource, the jmxExporter.enabled toggles the exporter on or off. Additionally, the metrics configuration can be supplied using key/value pairs in the jmxExporter.configFrom section.

    Stopping Debezium Server when scaled to zero

    Debezium Server is stopped when scaling the replicas to zero using the annotation, debezium.io/stop=true (DBZ-7953).

    Other changes

    What’s next & Outlook

    With the release of Debezium 2.7, the team is now switching gears and heavily focusing on the next major milestone, Debezium 3.0. This next major release includes a variety of changes, including but not limited to:

    • Java 17 as baseline

    • Kafka 3.1+ as baseline

    • New off-heap Oracle cache implementations based on EhCache & Hazelcast

    • Exactly-once semantics support for other relational connectors

    • Sink connector for MongoDB

    • and more

    This list represents a quick glance view at just what’s at the top of our queue, and is subject to change. If you’d like to get involved in the conversation about Debezium 3.0 and the next evolution of the project, contact us on the mailing list or Zulip chat. As always, please review our road map for more details.

    As summer is in full swing and holiday plans are starting for many, be safe. Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/07/08/async-embedded-engine/index.html b/blog/2024/07/08/async-embedded-engine/index.html index b317f6f18a..cb09603a23 100644 --- a/blog/2024/07/08/async-embedded-engine/index.html +++ b/blog/2024/07/08/async-embedded-engine/index.html @@ -6,4 +6,4 @@ }).build(); ExecutorService executor = Executors.newSingleThreadExecutor(); -executor.execute(engine);

    If you want to use AsyncEmbeddedEngine, for now you have to use create(KeyValueHeaderChangeEventFormat<K, V, H> format, String builderFactory) method with io.debezium.embedded.async.ConvertingAsyncEngineBuilderFactory as the builder factory. Other shortcut builder methods still point to EmbeddedEngine.

    Once you are done and want to terminate the engine, you call engine.close() as in the case of EmbeddedEngine. The main difference here is that once the AsyncEmbeddedEngine is closed, it cannot be started again and has to be re-created. The reason for this is to prevent possible resource leaks when the engine is being stopped and started from different threads in parallel (you can find more details in the design document and DBZ-2534).

    Configuration options

    Compared to EmbeddedEngine, AsyncEmbeddedEngine provides only a few additional configuration options, mostly related to thread management:

    • record.processing.threads - The size of the thread pool for record processing.

    • record.processing.order - Determines how the records should be produced, either ORDERED or UNORDERED.

    • record.processing.with.serial.consumer - Specifies whether the default ChangeConsumer should be created from the provided Consumer.

    • record.processing.shutdown.timeout.ms - Maximum time in milliseconds to wait for processing submitted records after a task shutdown is called.

    • task.management.timeout.ms - Time limit engine waits for a task’s lifecycle management operations (starting and stopping) to complete.

    record.processing.threads is quite clear, it’s the size of the shared thread pool used for processing records. You can use the AVAILABLE_CORES placeholder to use all available cores on the given machine.

    record.processing.order - as described above, the records can be processed in the same order as the changes happened in the database or in a completely asynchronous manner which results in out-of-order delivery of the records to the sink. Which method is used is determined by this option. Please note that this option has any effect only in the case when user handler is provided as a Consumer function. As explained in the previous section, ChangeConsumer expects the whole batch of records and therefore the Debezium engine cannot ensure processing of individual records in parallel and setting it to UNORDERED processing has no sense in this case.

    record.processing.with.serial.consumer determines, if the default ChangeConsumer should be created from user provided Consumer function. This is basically an option for backward compatibility with the EmbeddedEngine. In case of EmbeddedEngine is always used ChangeConsumer and if the user provides the Consumer function interested, EmbeddedEngine creates default ChangeConsumer. When you enable this option, AsyncEmbeddedEngine does the same and creates the same ChangeConsumer as EmbeddedEngine, so you can get completely the same behavior as in case of EmbeddedEngine.

    record.processing.shutdown.timeout.ms specifies for how long the engine should wait for processing of submitted records. Once shutdown is called, no other records are submitted for processing, but you may want to wait for records already being processed. As processing of the records in general should be fast, this can be some smaller value (from dozen milliseconds to units of seconds).

    task.management.timeout.ms determines the timeout for the task to start or stop. If the timeout is exceeded, the thread running the task is forcefully killed. When this timeout is exceeded during the startup and task is killed, all other tasks are killed as well. Either all the tasks have to start or none of them. Compared to record.processing.shutdown.timeout.ms, starting of the tasks can be quite time consuming (creating connections to the database etc.), so in this case the timeout should be substantially higher than timeout for record processing (possibly in terms of minutes).

    Debezium server usage

    Starting Debezium 2.6.0.Alpha2, Debezium server was switched to use AsyncEmbeddedEngine. Thus, if you use Debezium server 2.6.0.Alpha2 or later, you already use the asynchronous engine. As the Debezium engine currently uses only ChangeConsumer for processing CDC records, all constraints related to usage of ChangeConsumer mentioned above (impossibility to process records out of order) applies to the Debezium server as well. This can change in the future, but at the moment we don’t see any demand for it.

    Deprecation of EmbeddedEngine

    As of Debezium 2.7.0.Final, EmbeddedEngine was deprecated (DBZ-7976). We will keep it for about next 6 months. During this time we are going to migrate rest of our test suite to asynchronous engine (DBZ-7977) and then remove EmbeddedEngine in Debezium 3.1.0.Final (DBZ-8029). If you use the DebeziumEngine API, the migration should be very straightforward. The only thing you need to do if you use the converting wrapper is to switch from ConvertingEngineBuilderFactory to ConvertingAsyncEngineBuilderFactory, as described in the previous chapter. However, we would strongly recommend switching to the asynchronous engine sooner rather than later and eventually let us know if you spot any issue, so that we have sufficient time to fix any such issue before final removal of EmbeddedEngine.

    Future steps and outlook

    Besides the aforementioned removal of EmbeddedEngine, are we done with the changes or do we plan any further changes? Sure we plan to continue with the improvements! So what can you look for?

    With Debezium 3.0 we will switch to Java 21 for building Debezium and in the future releases Java 21 will become Debezium base line. With this, we would like to switch to Java virtual threads. This may bring even more speedup and eventually also simplify the code a little bit. We will evaluate this option based on the results of our internal performance tests.

    Speaking about performance tests, one may ask why at least some performance comparison is not mentioned in this blog post. We of course did some performance tests, we do have a some JMH benchmarks (PRs with improvements are welcome!) and also did some end-to-end performance tests. You can find some JMH results e.g. under this pull request, which also compares the results with EmbeddedEngine. On the other hand, we are fully aware of complexity and trickiness of performance testing and we believe having some solid results requires still some more work. It would deserve its own blog post anyway. After all, even with very solid performance results, the reality of your deployment may still be different, so what really matters is your performance tests, done on your hardware, your production network setup etc. If you do so, we would be more than happy to hear the results.

    As for other things, we may add more implementations of RecordProcessors, e.g. one suggested by Jeremy Ford in the discussion under the asynchronous engine DDD.

    In the longer term, we would like to add support for gRPC and Protocol Buffers. It should give us a two-fold advantage: Debezium engine should be able to coordinate execution of multiple tasks across different machines and also would be able to receive CDC records from them in the unified format. Ability to run multiple tasks (for connectors which allow it) on separate machines/containers is crucial especially in environments like Kubernetes, where you ideally want to run each task in a separate container. Defining Protocol Buffers format would allow Debezium to work with all kinds of connectors, written even in different languages and running on a large variety of devices, even on the edge, allowing the Debezium engine to become the heart of any CDC solution.

    These are plans for which you can look forward to in the short and long term future. What we are looking for in the near future is your feedback on the new asynchronous engine. If you have any, please share it via common means on either Debezium Zulip chat or mailing list.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +executor.execute(engine);

    If you want to use AsyncEmbeddedEngine, for now you have to use create(KeyValueHeaderChangeEventFormat<K, V, H> format, String builderFactory) method with io.debezium.embedded.async.ConvertingAsyncEngineBuilderFactory as the builder factory. Other shortcut builder methods still point to EmbeddedEngine.

    Once you are done and want to terminate the engine, you call engine.close() as in the case of EmbeddedEngine. The main difference here is that once the AsyncEmbeddedEngine is closed, it cannot be started again and has to be re-created. The reason for this is to prevent possible resource leaks when the engine is being stopped and started from different threads in parallel (you can find more details in the design document and DBZ-2534).

    Configuration options

    Compared to EmbeddedEngine, AsyncEmbeddedEngine provides only a few additional configuration options, mostly related to thread management:

    • record.processing.threads - The size of the thread pool for record processing.

    • record.processing.order - Determines how the records should be produced, either ORDERED or UNORDERED.

    • record.processing.with.serial.consumer - Specifies whether the default ChangeConsumer should be created from the provided Consumer.

    • record.processing.shutdown.timeout.ms - Maximum time in milliseconds to wait for processing submitted records after a task shutdown is called.

    • task.management.timeout.ms - Time limit engine waits for a task’s lifecycle management operations (starting and stopping) to complete.

    record.processing.threads is quite clear, it’s the size of the shared thread pool used for processing records. You can use the AVAILABLE_CORES placeholder to use all available cores on the given machine.

    record.processing.order - as described above, the records can be processed in the same order as the changes happened in the database or in a completely asynchronous manner which results in out-of-order delivery of the records to the sink. Which method is used is determined by this option. Please note that this option has any effect only in the case when user handler is provided as a Consumer function. As explained in the previous section, ChangeConsumer expects the whole batch of records and therefore the Debezium engine cannot ensure processing of individual records in parallel and setting it to UNORDERED processing has no sense in this case.

    record.processing.with.serial.consumer determines, if the default ChangeConsumer should be created from user provided Consumer function. This is basically an option for backward compatibility with the EmbeddedEngine. In case of EmbeddedEngine is always used ChangeConsumer and if the user provides the Consumer function interested, EmbeddedEngine creates default ChangeConsumer. When you enable this option, AsyncEmbeddedEngine does the same and creates the same ChangeConsumer as EmbeddedEngine, so you can get completely the same behavior as in case of EmbeddedEngine.

    record.processing.shutdown.timeout.ms specifies for how long the engine should wait for processing of submitted records. Once shutdown is called, no other records are submitted for processing, but you may want to wait for records already being processed. As processing of the records in general should be fast, this can be some smaller value (from dozen milliseconds to units of seconds).

    task.management.timeout.ms determines the timeout for the task to start or stop. If the timeout is exceeded, the thread running the task is forcefully killed. When this timeout is exceeded during the startup and task is killed, all other tasks are killed as well. Either all the tasks have to start or none of them. Compared to record.processing.shutdown.timeout.ms, starting of the tasks can be quite time consuming (creating connections to the database etc.), so in this case the timeout should be substantially higher than timeout for record processing (possibly in terms of minutes).

    Debezium server usage

    Starting Debezium 2.6.0.Alpha2, Debezium server was switched to use AsyncEmbeddedEngine. Thus, if you use Debezium server 2.6.0.Alpha2 or later, you already use the asynchronous engine. As the Debezium engine currently uses only ChangeConsumer for processing CDC records, all constraints related to usage of ChangeConsumer mentioned above (impossibility to process records out of order) applies to the Debezium server as well. This can change in the future, but at the moment we don’t see any demand for it.

    Deprecation of EmbeddedEngine

    As of Debezium 2.7.0.Final, EmbeddedEngine was deprecated (DBZ-7976). We will keep it for about next 6 months. During this time we are going to migrate rest of our test suite to asynchronous engine (DBZ-7977) and then remove EmbeddedEngine in Debezium 3.1.0.Final (DBZ-8029). If you use the DebeziumEngine API, the migration should be very straightforward. The only thing you need to do if you use the converting wrapper is to switch from ConvertingEngineBuilderFactory to ConvertingAsyncEngineBuilderFactory, as described in the previous chapter. However, we would strongly recommend switching to the asynchronous engine sooner rather than later and eventually let us know if you spot any issue, so that we have sufficient time to fix any such issue before final removal of EmbeddedEngine.

    Future steps and outlook

    Besides the aforementioned removal of EmbeddedEngine, are we done with the changes or do we plan any further changes? Sure we plan to continue with the improvements! So what can you look for?

    With Debezium 3.0 we will switch to Java 21 for building Debezium and in the future releases Java 21 will become Debezium base line. With this, we would like to switch to Java virtual threads. This may bring even more speedup and eventually also simplify the code a little bit. We will evaluate this option based on the results of our internal performance tests.

    Speaking about performance tests, one may ask why at least some performance comparison is not mentioned in this blog post. We of course did some performance tests, we do have a some JMH benchmarks (PRs with improvements are welcome!) and also did some end-to-end performance tests. You can find some JMH results e.g. under this pull request, which also compares the results with EmbeddedEngine. On the other hand, we are fully aware of complexity and trickiness of performance testing and we believe having some solid results requires still some more work. It would deserve its own blog post anyway. After all, even with very solid performance results, the reality of your deployment may still be different, so what really matters is your performance tests, done on your hardware, your production network setup etc. If you do so, we would be more than happy to hear the results.

    As for other things, we may add more implementations of RecordProcessors, e.g. one suggested by Jeremy Ford in the discussion under the asynchronous engine DDD.

    In the longer term, we would like to add support for gRPC and Protocol Buffers. It should give us a two-fold advantage: Debezium engine should be able to coordinate execution of multiple tasks across different machines and also would be able to receive CDC records from them in the unified format. Ability to run multiple tasks (for connectors which allow it) on separate machines/containers is crucial especially in environments like Kubernetes, where you ideally want to run each task in a separate container. Defining Protocol Buffers format would allow Debezium to work with all kinds of connectors, written even in different languages and running on a large variety of devices, even on the edge, allowing the Debezium engine to become the heart of any CDC solution.

    These are plans for which you can look forward to in the short and long term future. What we are looking for in the near future is your feedback on the new asynchronous engine. If you have any, please share it via common means on either Debezium Zulip chat or mailing list.

    Vojtěch Juránek

    Vojta is a software engineer at Red Hat. He lives in the Czech Republic.

     


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/07/11/debezium-3.0-alpha1-released/index.html b/blog/2024/07/11/debezium-3.0-alpha1-released/index.html index 13e5b30368..7e59212851 100644 --- a/blog/2024/07/11/debezium-3.0-alpha1-released/index.html +++ b/blog/2024/07/11/debezium-3.0-alpha1-released/index.html @@ -3,4 +3,4 @@ "connection.string": "...", "topics": "topic1,topic2", "sink.database": "targetdb", -}

    The connection.string and sink.database configuration properties are mandatory. These define the details for connecting to the target MongoDB database and the name of the target database where the changes will be written.

    Additionally, the topics configuration property is mandatory by Kafka Connect, and it describes a comma-separated list of regular expressions for the topics that the sink connector will observe.

    Documentation for this connector is still a work-in-progress, so if you have any questions or problems, don’t hesitate to reach out to the team on our Zulip chat or Mailing List.

    Other changes

    • Debezium postgres jdbc sink not handling infinity values DBZ-7920

    • JdbcSinkTask doesn’t clear offsets on stop DBZ-7946

    • ibmi as400 connector config isn’t prefixed with "database." DBZ-7955

    • Duplicate downstream annotation comments incorrectly refer to Db2 connector DBZ-7968

    • Issue with Hybrid mode and DDL change DBZ-7991

    • Extends process of finding Bundle path DBZ-7992

    • Incorrect offset/history property mapping generatated DBZ-8007

    • Debezium Server Operator on minikube with java.lang.NullPointerException': java.lang.NullPointerException DBZ-8019

    • ORA-65090: operation only allowed in a container database when connecting to a non-CDB database DBZ-8023

    • Added type to Prometheus JMX exporter DBZ-8036

    • Add kafka.producer metrics to debezium-server jmx exporter config DBZ-8037

    • Support FLOAT32 type in debezium-connector-spanner DBZ-8043

    In total, 20 issues were resolved in Debezium 3.0.0.Alpha1. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Andreas Martens, Aravind, Chris Cranford, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Ondrej Babec, René Kerner, Ricardo Rosales, Robert Roldan, Ryan van Huuksloot, and Shuran Zhang!

    What’s next & Outlook

    With Debezium 2.7.0.Final released, we’re actively looking at the reports from the community and addressing any critical bugs or regressions that were not identified throughout development. If you believe you’ve spotted a bug or regression, please log a Jira issue so that these can be tracked and addressed. Of course, contributions for these are much appreciated, if you have the bandwidth.

    While this pre-release is smaller than we generally prefer, it’s focus was primarily on the inclusion of the MongoDB sink connector and the move to Java 17/21. There are still many features in the pipeline that will be coming in a future build of Debezium, including but not limited to:

    • Kafka 3.1+ as baseline

    • New off-heap Oracle cache implementations based on EhCache & Hazelcast

    • Exactly-once semantics support for other relational connectors

    • and more

    This list represents a quick glance view at just what’s at the top of our queue, and is subject to change. If you’d like to get involved in the conversation about Debezium 3.0 and the next evolution of the project, contact us on the mailing list or Zulip chat. As always, please review our road map for more details.

    For now, that’s it from our side, enjoy the summer and be safe! Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    The connection.string and sink.database configuration properties are mandatory. These define the details for connecting to the target MongoDB database and the name of the target database where the changes will be written.

    Additionally, the topics configuration property is mandatory by Kafka Connect, and it describes a comma-separated list of regular expressions for the topics that the sink connector will observe.

    Documentation for this connector is still a work-in-progress, so if you have any questions or problems, don’t hesitate to reach out to the team on our Zulip chat or Mailing List.

    Other changes

    • Debezium postgres jdbc sink not handling infinity values DBZ-7920

    • JdbcSinkTask doesn’t clear offsets on stop DBZ-7946

    • ibmi as400 connector config isn’t prefixed with "database." DBZ-7955

    • Duplicate downstream annotation comments incorrectly refer to Db2 connector DBZ-7968

    • Issue with Hybrid mode and DDL change DBZ-7991

    • Extends process of finding Bundle path DBZ-7992

    • Incorrect offset/history property mapping generatated DBZ-8007

    • Debezium Server Operator on minikube with java.lang.NullPointerException': java.lang.NullPointerException DBZ-8019

    • ORA-65090: operation only allowed in a container database when connecting to a non-CDB database DBZ-8023

    • Added type to Prometheus JMX exporter DBZ-8036

    • Add kafka.producer metrics to debezium-server jmx exporter config DBZ-8037

    • Support FLOAT32 type in debezium-connector-spanner DBZ-8043

    In total, 20 issues were resolved in Debezium 3.0.0.Alpha1. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Andreas Martens, Aravind, Chris Cranford, Jakub Cechacek, Jiri Pechanec, Mario Fiore Vitale, Ondrej Babec, René Kerner, Ricardo Rosales, Robert Roldan, Ryan van Huuksloot, and Shuran Zhang!

    What’s next & Outlook

    With Debezium 2.7.0.Final released, we’re actively looking at the reports from the community and addressing any critical bugs or regressions that were not identified throughout development. If you believe you’ve spotted a bug or regression, please log a Jira issue so that these can be tracked and addressed. Of course, contributions for these are much appreciated, if you have the bandwidth.

    While this pre-release is smaller than we generally prefer, it’s focus was primarily on the inclusion of the MongoDB sink connector and the move to Java 17/21. There are still many features in the pipeline that will be coming in a future build of Debezium, including but not limited to:

    • Kafka 3.1+ as baseline

    • New off-heap Oracle cache implementations based on EhCache & Hazelcast

    • Exactly-once semantics support for other relational connectors

    • and more

    This list represents a quick glance view at just what’s at the top of our queue, and is subject to change. If you’d like to get involved in the conversation about Debezium 3.0 and the next evolution of the project, contact us on the mailing list or Zulip chat. As always, please review our road map for more details.

    For now, that’s it from our side, enjoy the summer and be safe! Until next time…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/08/02/debezium-3.0-alpha2-released/index.html b/blog/2024/08/02/debezium-3.0-alpha2-released/index.html index fe6863e658..2719573e36 100644 --- a/blog/2024/08/02/debezium-3.0-alpha2-released/index.html +++ b/blog/2024/08/02/debezium-3.0-alpha2-released/index.html @@ -1 +1 @@ - Debezium 3.0.0.Alpha2 Released

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    Breaking changes

    With any new major release of software, there is often several breaking changes. The Debezium 3.0.0.Alpha2 release is no exception, so let’s discuss the major changes you should be aware about.

    Built on Kafka 3.8

    This release moves to Kafka 3.8 as our baseline for testing and building Debezium. Kafka 3.8 changed a number of internal APIs that needed to be adapted for Debezium’s use (DBZ-8105).

    For most users, this change has no impact; however, if you are extending Debezium, it’s important to be aware of these changes.

    New features and improvements

    The upgrade to Debezium 3.0.0.Alpha2 introduces a number of new improvements to several components:

    JDBC Sink

    Relocation of JDBC sink repository

    The JDBC sink repository has been relocated from debezium-connector-jdbc to debezium main repository (DBZ-8008). With the introduction of the MongoDB sink connector in Debezium 3.0.0.Alpha1, this allows the team to easily share common contracts across our sink connectors.

    Moving forward, to raise pull requests for the JDBC sink, please use the main Debezium repository, as the old repository will be archived and placed into read-only mode after Debezium 2.7.1.Final this week.

    Debezium Server

    Custom converter types support

    In prior releases of Debezium Server, there were a finite number of converters that could be used for headers, keys, and values. These included Json, JsonByteArray, CloudEvents, Avro, Protobuf, Binary, and SimpleString. While these often satisfied a vast majority of use cases, it’s not uncommon that someone may have a unique requirement specific to their environment that is outside these options.

    In this release, a new ClientProvided converter option has been added, which allows for extending the header, key, and value converters with a custom, user-supplied implementation (DBZ-8040).

    Google Spanner

    Support for 32-bit floats

    The Google Spanner database introduced support for a 32-bit float data type. The Debezium Google Spanner connector has been adjusted to support this new data type (DBZ-8043).

    Vitess

    Empty shard support

    In Vitess, it is possible for a keyspace to have shards that have no tablets. Debezium for Vitess has improved working with this use case, and now gracefully handles such a keyspace without fault (DBZ-8053).

    Other changes

    • Embedded Infinispan tests fail to start with Java 23 DBZ-7840

    • Clarify that Oracle connector does not read from physical standby DBZ-7895

    • Bump Infinispan to 14.0.29.Final DBZ-8010

    • StackOverflow exception on incremental snapshot DBZ-8011

    • Write a blog post about async engine DBZ-8013

    • Test offset/history store configurations DBZ-8015

    • JDBC primary.key.fields cannot be empty when i set insert.mode to upsert and primary.key.mode record_value DBZ-8018

    • Unable to acquire buffer lock, buffer queue is likely full DBZ-8022

    • Add Status ObservedGeneration to Operator DBZ-8025

    • Release process sets incorrect images for k8s for the next development version DBZ-8041

    • Use recreate as (default) rollout strategy for deployments DBZ-8047

    • "Unexpected input: ." when snapshot incremental empty Database DBZ-8050

    • Debezium Operator Using RollingUpdate Strategy DBZ-8051

    • Debezium Operator Using RollingUpdate Strategy DBZ-8052

    • Refactor LogMining implementation to allow alternative cache implementations DBZ-8054

    • Oracle connector inconsistency in redo log switches DBZ-8055

    • Blocking snapshot can fail due to CommunicationsException DBZ-8058

    • FakeDNS not working with JDK version > 18 DBZ-8059

    • Debezium Operator with a provided Service Account doesn’t spin up deployment DBZ-8061

    • Upgrade postgres server version to 15 DBZ-8062

    • Standard Webhooks signatures for HTTP sink DBZ-8063

    • ParsingException (MySQL/MariaDB): rename table syntax DBZ-8066

    • Disable DebeziumResourceNoTopicCreationIT - no longer compatible with Java 21 DBZ-8067

    • Oracle histogram metrics are no longer printed in logs correctly DBZ-8068

    • Vitess-connector should provide a topic naming strategy that supports separate connectors per-table DBZ-8069

    • In hybrid log.mining.strategy reconstruction logs should be set to DEBUG DBZ-8070

    • Speed-up PostgresShutdownIT DBZ-8075

    • Support capturing BLOB column types during snapshot for MySQL/MariaDB DBZ-8076

    • Standard Webhooks auth secret config value is not marked as PASSWORD_PATTERN DBZ-8078

    • Vitess transaction Epoch should not reset to zero when tx ID is missing DBZ-8087

    • Add MariaDB to debezium/connect image DBZ-8088

    • After changing the column datatype from int to float the Debezium fails to round it and i get a null value for this field in the stream DBZ-8089

    • MySQL and MariaDB keyword YES cannot be parsed as a column name DBZ-8092

    • Update third-party LICENSE with LGPL forMariaDB Connector/J DBZ-8099

    • NotificationIT tests seemingly seem to fail due to stepping on one another DBZ-8100

    • ORA-26928 - Unable to communicate with XStream apply coordinator process should be retriable DBZ-8102

    • Transformations are not closed in emebdded engine DBZ-8106

    • Rabbitmq native stream Failed DBZ-8108

    • Don’t close connection after loading timescale metadata in TimescaleDb SMT DBZ-8109

    In total, 43 issues were resolved in Debezium 3.0.0.Alpha2. The list of changes can also be found in our release notes.

    What’s next & Outlook

    We are about half-way through the Debezium 3.0 release cycle, with many more new features still in the works. As we continue to work through these features, we will continue to update Debezium 2.7 with bug fixes and various improvements. You can expect the first maintenance release 2.7.1.Final for Debezium later this week.

    Please check out our road map for more details, and get involved in the conversation. You can contact us on the mailing list or Zulip chat if you have any questions or suggestions about anything Debezuim.

    Until next time, stay cool and safe…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 3.0.0.Alpha2 Released

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    Breaking changes

    With any new major release of software, there is often several breaking changes. The Debezium 3.0.0.Alpha2 release is no exception, so let’s discuss the major changes you should be aware about.

    Built on Kafka 3.8

    This release moves to Kafka 3.8 as our baseline for testing and building Debezium. Kafka 3.8 changed a number of internal APIs that needed to be adapted for Debezium’s use (DBZ-8105).

    For most users, this change has no impact; however, if you are extending Debezium, it’s important to be aware of these changes.

    New features and improvements

    The upgrade to Debezium 3.0.0.Alpha2 introduces a number of new improvements to several components:

    JDBC Sink

    Relocation of JDBC sink repository

    The JDBC sink repository has been relocated from debezium-connector-jdbc to debezium main repository (DBZ-8008). With the introduction of the MongoDB sink connector in Debezium 3.0.0.Alpha1, this allows the team to easily share common contracts across our sink connectors.

    Moving forward, to raise pull requests for the JDBC sink, please use the main Debezium repository, as the old repository will be archived and placed into read-only mode after Debezium 2.7.1.Final this week.

    Debezium Server

    Custom converter types support

    In prior releases of Debezium Server, there were a finite number of converters that could be used for headers, keys, and values. These included Json, JsonByteArray, CloudEvents, Avro, Protobuf, Binary, and SimpleString. While these often satisfied a vast majority of use cases, it’s not uncommon that someone may have a unique requirement specific to their environment that is outside these options.

    In this release, a new ClientProvided converter option has been added, which allows for extending the header, key, and value converters with a custom, user-supplied implementation (DBZ-8040).

    Google Spanner

    Support for 32-bit floats

    The Google Spanner database introduced support for a 32-bit float data type. The Debezium Google Spanner connector has been adjusted to support this new data type (DBZ-8043).

    Vitess

    Empty shard support

    In Vitess, it is possible for a keyspace to have shards that have no tablets. Debezium for Vitess has improved working with this use case, and now gracefully handles such a keyspace without fault (DBZ-8053).

    Other changes

    • Embedded Infinispan tests fail to start with Java 23 DBZ-7840

    • Clarify that Oracle connector does not read from physical standby DBZ-7895

    • Bump Infinispan to 14.0.29.Final DBZ-8010

    • StackOverflow exception on incremental snapshot DBZ-8011

    • Write a blog post about async engine DBZ-8013

    • Test offset/history store configurations DBZ-8015

    • JDBC primary.key.fields cannot be empty when i set insert.mode to upsert and primary.key.mode record_value DBZ-8018

    • Unable to acquire buffer lock, buffer queue is likely full DBZ-8022

    • Add Status ObservedGeneration to Operator DBZ-8025

    • Release process sets incorrect images for k8s for the next development version DBZ-8041

    • Use recreate as (default) rollout strategy for deployments DBZ-8047

    • "Unexpected input: ." when snapshot incremental empty Database DBZ-8050

    • Debezium Operator Using RollingUpdate Strategy DBZ-8051

    • Debezium Operator Using RollingUpdate Strategy DBZ-8052

    • Refactor LogMining implementation to allow alternative cache implementations DBZ-8054

    • Oracle connector inconsistency in redo log switches DBZ-8055

    • Blocking snapshot can fail due to CommunicationsException DBZ-8058

    • FakeDNS not working with JDK version > 18 DBZ-8059

    • Debezium Operator with a provided Service Account doesn’t spin up deployment DBZ-8061

    • Upgrade postgres server version to 15 DBZ-8062

    • Standard Webhooks signatures for HTTP sink DBZ-8063

    • ParsingException (MySQL/MariaDB): rename table syntax DBZ-8066

    • Disable DebeziumResourceNoTopicCreationIT - no longer compatible with Java 21 DBZ-8067

    • Oracle histogram metrics are no longer printed in logs correctly DBZ-8068

    • Vitess-connector should provide a topic naming strategy that supports separate connectors per-table DBZ-8069

    • In hybrid log.mining.strategy reconstruction logs should be set to DEBUG DBZ-8070

    • Speed-up PostgresShutdownIT DBZ-8075

    • Support capturing BLOB column types during snapshot for MySQL/MariaDB DBZ-8076

    • Standard Webhooks auth secret config value is not marked as PASSWORD_PATTERN DBZ-8078

    • Vitess transaction Epoch should not reset to zero when tx ID is missing DBZ-8087

    • Add MariaDB to debezium/connect image DBZ-8088

    • After changing the column datatype from int to float the Debezium fails to round it and i get a null value for this field in the stream DBZ-8089

    • MySQL and MariaDB keyword YES cannot be parsed as a column name DBZ-8092

    • Update third-party LICENSE with LGPL forMariaDB Connector/J DBZ-8099

    • NotificationIT tests seemingly seem to fail due to stepping on one another DBZ-8100

    • ORA-26928 - Unable to communicate with XStream apply coordinator process should be retriable DBZ-8102

    • Transformations are not closed in emebdded engine DBZ-8106

    • Rabbitmq native stream Failed DBZ-8108

    • Don’t close connection after loading timescale metadata in TimescaleDb SMT DBZ-8109

    In total, 43 issues were resolved in Debezium 3.0.0.Alpha2. The list of changes can also be found in our release notes.

    What’s next & Outlook

    We are about half-way through the Debezium 3.0 release cycle, with many more new features still in the works. As we continue to work through these features, we will continue to update Debezium 2.7 with bug fixes and various improvements. You can expect the first maintenance release 2.7.1.Final for Debezium later this week.

    Please check out our road map for more details, and get involved in the conversation. You can contact us on the mailing list or Zulip chat if you have any questions or suggestions about anything Debezuim.

    Until next time, stay cool and safe…​

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/08/05/Debezium-ui-update/index.html b/blog/2024/08/05/Debezium-ui-update/index.html index 94e14e759c..d45e4c341d 100644 --- a/blog/2024/08/05/Debezium-ui-update/index.html +++ b/blog/2024/08/05/Debezium-ui-update/index.html @@ -1 +1 @@ - Status of Debezium UI

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me.

    Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right direction. It was thus decided to freeze the development of the current web UI project. There were two main reasons for this decision:

    1. The team doesn’t have the bandwidth (nor the desire) to develop a general purpose Kafka connector UI that would be on par with some of the existing offerings (e.g. Lenses). So our UI would only ever be able to work with Debezium connectors specifically.

    2. Debezium Server is gaining popularity and we would like to offer a user friendly way of managing this flavour of Debezium as well. Unfortunately, the deployment model of Debezium Server is very different from deploying Debezium as a connector.

    What’s Next?

    As the previous paragraph mentioned the current UI, it’s probably not a suprise that we did not give up on the idea. Part of the decision was to explore a possibility of creating a new UI which, while still focused exclusively on Debezium, would gradually support multiple deployment models. The idea is to start with Debezium Server deployed on Kubernetes, followed by bare metal deployment of Debezium Server. At some point in the future we would like to cycle back to Kafka Connect and support also the deployment and operation of Debezium as Kafka Connect connectors.

    Architecture

    Designing the New UI

    One of the goals we defined for ourselves when designing the new platform was to simplify the understanding of what Debezium does. For this reason, we are going to abstract away from the underlying deployment model. Instead of focusing on servers and connectors, we want to allow our users to think about Debezium in terms of data. Thus the new generation of the UI should operate with data pipelines — you define the source, the destination, and any data transformations.

    These can then be composed into a deployable pipeline. Depending on the target environment (Kafka Connect, Kubernetes, bare metal), an appropriate deployment will be used.

    Initial Wireframe Design

    When pipeline is deployed to a Kubernetes or a bare metal machine

    • The Source is the configuration of the Debezium Source connector

    • Transformations are the usual SMTs

    • The Destination coresponds to the configuration of Debezium Server’s Sink.

    When pipeline is deployed to Kafka Connect

    • The Source is the configuration of the Debezium Source connector

    • Transformations are the usual SMTs

    • The destination is always a Kafka topic

    In the future, we could also allow our JDBC and MongoDB sink connectors to be used as destinations, allowing for truly end-to-end defined pipelines when Kafka Connect is targeted.

    Where Do We Stand Right Now

    We are at the begining of this journey. Nevetheless, there is a rough PoC prototype ready for you to examine. As already briefly mentioned, initially the plan is to target only Kubernetes as supported deployment environment, more specifically Debezium Server on Kubernetes. This choice allows us to take an advantage of our Debezium Operator.

    Screenshot: Destination types list

    We intend to introduce configuration and feature support gradually. Thus the configuration is deliberately opinionated and simplistic — currently all pipelines can only use ephemeral storage. In this early version, you must also set all configuration properties as simple key-value pairs. We intend to change this in the future, and as a next step, you can look forward to auto-completion of property names, and ultimately, fully specialized forms will be rendered.

    Screenshot: Create new source

    You can review the current status of the codebase on GitHub:

    We need your help!

    At the end of this short report, we would like to ask you to try this early PoC and let us know your opinion. We would especially appreciate if you could let as know your answers to the following questions:

    • What features are a must for you?

    • What features would you consider nice to have?

    • What is your visual impression?

    Thank you and expect to hear from us soon with more updates to come!

    Čecháček Jakub

    Jakub is a Principal Software Engineer at Red Hat. He lives in Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Status of Debezium UI

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me.

    Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right direction. It was thus decided to freeze the development of the current web UI project. There were two main reasons for this decision:

    1. The team doesn’t have the bandwidth (nor the desire) to develop a general purpose Kafka connector UI that would be on par with some of the existing offerings (e.g. Lenses). So our UI would only ever be able to work with Debezium connectors specifically.

    2. Debezium Server is gaining popularity and we would like to offer a user friendly way of managing this flavour of Debezium as well. Unfortunately, the deployment model of Debezium Server is very different from deploying Debezium as a connector.

    What’s Next?

    As the previous paragraph mentioned the current UI, it’s probably not a suprise that we did not give up on the idea. Part of the decision was to explore a possibility of creating a new UI which, while still focused exclusively on Debezium, would gradually support multiple deployment models. The idea is to start with Debezium Server deployed on Kubernetes, followed by bare metal deployment of Debezium Server. At some point in the future we would like to cycle back to Kafka Connect and support also the deployment and operation of Debezium as Kafka Connect connectors.

    Architecture

    Designing the New UI

    One of the goals we defined for ourselves when designing the new platform was to simplify the understanding of what Debezium does. For this reason, we are going to abstract away from the underlying deployment model. Instead of focusing on servers and connectors, we want to allow our users to think about Debezium in terms of data. Thus the new generation of the UI should operate with data pipelines — you define the source, the destination, and any data transformations.

    These can then be composed into a deployable pipeline. Depending on the target environment (Kafka Connect, Kubernetes, bare metal), an appropriate deployment will be used.

    Initial Wireframe Design

    When pipeline is deployed to a Kubernetes or a bare metal machine

    • The Source is the configuration of the Debezium Source connector

    • Transformations are the usual SMTs

    • The Destination coresponds to the configuration of Debezium Server’s Sink.

    When pipeline is deployed to Kafka Connect

    • The Source is the configuration of the Debezium Source connector

    • Transformations are the usual SMTs

    • The destination is always a Kafka topic

    In the future, we could also allow our JDBC and MongoDB sink connectors to be used as destinations, allowing for truly end-to-end defined pipelines when Kafka Connect is targeted.

    Where Do We Stand Right Now

    We are at the begining of this journey. Nevetheless, there is a rough PoC prototype ready for you to examine. As already briefly mentioned, initially the plan is to target only Kubernetes as supported deployment environment, more specifically Debezium Server on Kubernetes. This choice allows us to take an advantage of our Debezium Operator.

    Screenshot: Destination types list

    We intend to introduce configuration and feature support gradually. Thus the configuration is deliberately opinionated and simplistic — currently all pipelines can only use ephemeral storage. In this early version, you must also set all configuration properties as simple key-value pairs. We intend to change this in the future, and as a next step, you can look forward to auto-completion of property names, and ultimately, fully specialized forms will be rendered.

    Screenshot: Create new source

    You can review the current status of the codebase on GitHub:

    We need your help!

    At the end of this short report, we would like to ask you to try this early PoC and let us know your opinion. We would especially appreciate if you could let as know your answers to the following questions:

    • What features are a must for you?

    • What features would you consider nice to have?

    • What is your visual impression?

    Thank you and expect to hear from us soon with more updates to come!

    Čecháček Jakub

    Jakub is a Principal Software Engineer at Red Hat. He lives in Czech Republic.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/08/08/debezium-2-7-1-final-released/index.html b/blog/2024/08/08/debezium-2-7-1-final-released/index.html index 0d22c5f4bd..125b948284 100644 --- a/blog/2024/08/08/debezium-2-7-1-final-released/index.html +++ b/blog/2024/08/08/debezium-2-7-1-final-released/index.html @@ -1 +1 @@ - Debezium 2.7.1.Final Released

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    Fixes and stability improvements

    Debezium 2.7.1.Final introduces several new bug fixes and stability improvements, lets take a look at a few of them.

    MariaDB is now included in Debezium container images

    In Debezium 2.7.0.Final, we mistakenly missed including the new Debezium for MariaDB connector in the container images, which necessitated users to manually install the MariaDB connector. This has been fixed and Debezium 2.7.1.Final’s container images now include all connectors like they did previously (DBZ-8008).

    Support for MariaDB RENAME TABLE

    In Debezium 2.7 with the separation of MariaDB and MySQL into their unique connectors, each also use a unique grammar to parse SQL statements. A regression was identified that the new MariaDB grammar did not support the SQL syntax for RENAME TABLE (DBZ-8066). This has been fixed and is fully supported in both MySQL and MariaDB.

    JDBC sink partition rebalance fixed

    In prior versions of the Debezium JDBC sink, users may have noticed several strange messages in their connector logs that may resemble the following output:

    Ignoring invalid task provided offset topicXYZ-1 OffsetAndMetadata{offset=4966, leaderEpoch=null, metadata=''} — partition not assigned, assignment=[topicXYZ-1] (org.apache.kafka.connect.runtime.WorkerSinkTask:434)

    Thanks to a user report (DBZ-7946), it was identified that the JDBC sink connector was not properly handling the rebalance of partitions across multiple tasks. This regression has been fixed in 2.7.1.Final, and when a partition rebalance occurs, the partition will be closed by the task that no longer will manage the partition; ergo, the above messages will no longer be written to the connector logs.

    Ad-hoc snapshot improvements

    In Debezium 2.7, users may have noticed that if you picked the insert_delete watermark strategy for handling signal table entries, that the connector would eventually fail with a StackOverflowError (DBZ-8011). With Debezium 2.7.1.Final or later, this regression has been fixed and users can once again use the insert_delete watermark strategy.

    In addition, there was also a report where sending an ad-hoc snapshot signal that contained a regular expression that matches no database tables would cause the ad-hoc snapshot process to fail with an "Unexpected result ." error (DBZ-8050). This has also been fixed so the ad-hoc snapshot will gracefully skip the signal as no matching tables were found.

    Finally, it was also identified that ad-hoc blocking snapshots could lead to a CommunicationsException due to how the connection was being managed by the initial and blocking snapshot code paths (DBZ-8058). This has been fixed in Debezium 2.7.1.Final by having the blocking snapshot validate and re-open the connection if required.

    Embedded engine now closes transformations

    The new asynchronous embedded engine was recently released by the Debezium team and continues to improve across releases. As a part of our continuous improvements, it was identified that single message transforms did not have their close() method being called. This could easily lead to resource leaks depending on what the transformation does.

    This release introduces a fix to automatically close transformations to avoid this problem (DBZ-8106).

    Oracle connector could produce a NullPointerException

    A change was added to the Debezium Oracle connector (DBZ-8055) that to address several race concerns with Oracle LogMiner and capturing changes. Unfortunately, that change introduced a corner case where the connector would recalculate the upper mining range, and in such cases would lead to a NullPointerException.

    A new fix (DBZ-8119) was included in Debezium 2.7.1.Final to correct the problem. Oracle connector users are recommended to upgrade to 2.7.1.Final and to skip 2.7.0.Final to avoid this regression.

    Vitess epoch calculations fixed

    It was identified that there were some corner cases where the transaction epoch value was being reset when a transaction ID was unavailable (DBZ-8087). This is unexpected behavior as the transaction epoch should monotonically increase, and the resetting to zero broke that expectation. A fix has been added to avoid the reset and to restore the expected transaction epoch behavior.

    Other fixes

    • Document new MariaDB connector DBZ-7786

    • Clarify that Oracle connector does not read from physical standby DBZ-7895

    • Issue with Hybrid mode and DDL change DBZ-7991

    • Incorrect offset/history property mapping generated DBZ-8007

    • Bump Infinispan to 14.0.29.Final DBZ-8010

    • JDBC primary.key.fields cannot be empty when i set insert.mode to upsert and primary.key.mode record_value DBZ-8018

    • Unable to acquire buffer lock, buffer queue is likely full DBZ-8022

    • ORA-65090: operation only allowed in a container database when connecting to a non-CDB database DBZ-8023

    • Add disclaimer that PostProcessors and CustomConverters are Debezium source connectors only DBZ-8031

    • Added type to Prometheus JMX exporter DBZ-8036

    • Add kafka.producer metrics to debezium-server jmx exporter config DBZ-8037

    • Oracle connector inconsistency in redo log switches DBZ-8055

    • Oracle histogram metrics are no longer printed in logs correctly DBZ-8068

    • In hybrid log.mining.strategy reconstruction logs should be set to DEBUG DBZ-8070

    • Support capturing BLOB column types during snapshot for MySQL/MariaDB DBZ-8076

    • Conditionalize reference to the MySQL default value in description of schema.history.internal.store.only.captured.databases.ddl DBZ-8081

    • After changing the column datatype from int to float the Debezium fails to round it and i get a null value for this field in the stream DBZ-8089

    • MySQL and MariaDB keyword YES cannot be parsed as a column name DBZ-8092

    • Update third-party LICENSE with LGPL forMariaDB Connector/J DBZ-8099

    • NotificationIT tests seemingly seem to fail due to stepping on one another DBZ-8100

    • ORA-26928 - Unable to communicate with XStream apply coordinator process should be retriable DBZ-8102

    • Converters documentation uses incorrect examples DBZ-8104

    • Remove reference to`additional condition` signal parameter from ad hoc snapshots doc DBZ-8107

    • Don’t close connection after loading timescale metadata in TimescaleDb SMT DBZ-8109

    • Primary Key Update/ Snapshot Race Condition DBZ-8113

    • Docs: connect-log4j.properties instead log4j.properties DBZ-8117

    • TimescaleDbDatabaseTest.shouldTransformCompressedChunks is failing DBZ-8123

    • Update Oracle connector doc to describe options for restricting access permissions for the Debezium LogMiner user DBZ-8124

    In total there were 37 issues resolved in Debezium 2.7.1.Final. The list of changes can also be found in our release notes.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.7.1.Final Released

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    Fixes and stability improvements

    Debezium 2.7.1.Final introduces several new bug fixes and stability improvements, lets take a look at a few of them.

    MariaDB is now included in Debezium container images

    In Debezium 2.7.0.Final, we mistakenly missed including the new Debezium for MariaDB connector in the container images, which necessitated users to manually install the MariaDB connector. This has been fixed and Debezium 2.7.1.Final’s container images now include all connectors like they did previously (DBZ-8008).

    Support for MariaDB RENAME TABLE

    In Debezium 2.7 with the separation of MariaDB and MySQL into their unique connectors, each also use a unique grammar to parse SQL statements. A regression was identified that the new MariaDB grammar did not support the SQL syntax for RENAME TABLE (DBZ-8066). This has been fixed and is fully supported in both MySQL and MariaDB.

    JDBC sink partition rebalance fixed

    In prior versions of the Debezium JDBC sink, users may have noticed several strange messages in their connector logs that may resemble the following output:

    Ignoring invalid task provided offset topicXYZ-1 OffsetAndMetadata{offset=4966, leaderEpoch=null, metadata=''} — partition not assigned, assignment=[topicXYZ-1] (org.apache.kafka.connect.runtime.WorkerSinkTask:434)

    Thanks to a user report (DBZ-7946), it was identified that the JDBC sink connector was not properly handling the rebalance of partitions across multiple tasks. This regression has been fixed in 2.7.1.Final, and when a partition rebalance occurs, the partition will be closed by the task that no longer will manage the partition; ergo, the above messages will no longer be written to the connector logs.

    Ad-hoc snapshot improvements

    In Debezium 2.7, users may have noticed that if you picked the insert_delete watermark strategy for handling signal table entries, that the connector would eventually fail with a StackOverflowError (DBZ-8011). With Debezium 2.7.1.Final or later, this regression has been fixed and users can once again use the insert_delete watermark strategy.

    In addition, there was also a report where sending an ad-hoc snapshot signal that contained a regular expression that matches no database tables would cause the ad-hoc snapshot process to fail with an "Unexpected result ." error (DBZ-8050). This has also been fixed so the ad-hoc snapshot will gracefully skip the signal as no matching tables were found.

    Finally, it was also identified that ad-hoc blocking snapshots could lead to a CommunicationsException due to how the connection was being managed by the initial and blocking snapshot code paths (DBZ-8058). This has been fixed in Debezium 2.7.1.Final by having the blocking snapshot validate and re-open the connection if required.

    Embedded engine now closes transformations

    The new asynchronous embedded engine was recently released by the Debezium team and continues to improve across releases. As a part of our continuous improvements, it was identified that single message transforms did not have their close() method being called. This could easily lead to resource leaks depending on what the transformation does.

    This release introduces a fix to automatically close transformations to avoid this problem (DBZ-8106).

    Oracle connector could produce a NullPointerException

    A change was added to the Debezium Oracle connector (DBZ-8055) that to address several race concerns with Oracle LogMiner and capturing changes. Unfortunately, that change introduced a corner case where the connector would recalculate the upper mining range, and in such cases would lead to a NullPointerException.

    A new fix (DBZ-8119) was included in Debezium 2.7.1.Final to correct the problem. Oracle connector users are recommended to upgrade to 2.7.1.Final and to skip 2.7.0.Final to avoid this regression.

    Vitess epoch calculations fixed

    It was identified that there were some corner cases where the transaction epoch value was being reset when a transaction ID was unavailable (DBZ-8087). This is unexpected behavior as the transaction epoch should monotonically increase, and the resetting to zero broke that expectation. A fix has been added to avoid the reset and to restore the expected transaction epoch behavior.

    Other fixes

    • Document new MariaDB connector DBZ-7786

    • Clarify that Oracle connector does not read from physical standby DBZ-7895

    • Issue with Hybrid mode and DDL change DBZ-7991

    • Incorrect offset/history property mapping generated DBZ-8007

    • Bump Infinispan to 14.0.29.Final DBZ-8010

    • JDBC primary.key.fields cannot be empty when i set insert.mode to upsert and primary.key.mode record_value DBZ-8018

    • Unable to acquire buffer lock, buffer queue is likely full DBZ-8022

    • ORA-65090: operation only allowed in a container database when connecting to a non-CDB database DBZ-8023

    • Add disclaimer that PostProcessors and CustomConverters are Debezium source connectors only DBZ-8031

    • Added type to Prometheus JMX exporter DBZ-8036

    • Add kafka.producer metrics to debezium-server jmx exporter config DBZ-8037

    • Oracle connector inconsistency in redo log switches DBZ-8055

    • Oracle histogram metrics are no longer printed in logs correctly DBZ-8068

    • In hybrid log.mining.strategy reconstruction logs should be set to DEBUG DBZ-8070

    • Support capturing BLOB column types during snapshot for MySQL/MariaDB DBZ-8076

    • Conditionalize reference to the MySQL default value in description of schema.history.internal.store.only.captured.databases.ddl DBZ-8081

    • After changing the column datatype from int to float the Debezium fails to round it and i get a null value for this field in the stream DBZ-8089

    • MySQL and MariaDB keyword YES cannot be parsed as a column name DBZ-8092

    • Update third-party LICENSE with LGPL forMariaDB Connector/J DBZ-8099

    • NotificationIT tests seemingly seem to fail due to stepping on one another DBZ-8100

    • ORA-26928 - Unable to communicate with XStream apply coordinator process should be retriable DBZ-8102

    • Converters documentation uses incorrect examples DBZ-8104

    • Remove reference to`additional condition` signal parameter from ad hoc snapshots doc DBZ-8107

    • Don’t close connection after loading timescale metadata in TimescaleDb SMT DBZ-8109

    • Primary Key Update/ Snapshot Race Condition DBZ-8113

    • Docs: connect-log4j.properties instead log4j.properties DBZ-8117

    • TimescaleDbDatabaseTest.shouldTransformCompressedChunks is failing DBZ-8123

    • Update Oracle connector doc to describe options for restricting access permissions for the Debezium LogMiner user DBZ-8124

    In total there were 37 issues resolved in Debezium 2.7.1.Final. The list of changes can also be found in our release notes.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/08/26/debezium-3.0-beta1-released/index.html b/blog/2024/08/26/debezium-3.0-beta1-released/index.html index a8d4e9766c..148c7fc698 100644 --- a/blog/2024/08/26/debezium-3.0-beta1-released/index.html +++ b/blog/2024/08/26/debezium-3.0-beta1-released/index.html @@ -31,4 +31,4 @@ "quantity": 2, "totalPrice": 39.98 } -}

    So you can safely implement the Outbox pattern without the physical outbox table! (DBZ-8103).

    Other changes

    Altogether, 48 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • MySQL has deprecated mysql_native_password usage DBZ-7049

    • Upgrade to Apicurio 2.5.8 or higher DBZ-7357

    • Incremental snapshots don’t work with CloudEvent converter DBZ-7601

    • Snapshot retrying logic falls into infinite retry loop DBZ-7860

    • Move Debezium Conductor repository under Debezium Organisation DBZ-7973

    • Log additional details about abandoned transactions DBZ-8044

    • ConverterBuilder doesn’t pass Headers to be manipulated DBZ-8082

    • Bump Debezium Server to Quarkus 3.8.5 DBZ-8095

    • Primary Key Update/ Snapshot Race Condition DBZ-8113

    • Support DECIMAL(p) Floating Point DBZ-8114

    • Recalculating mining range upper bounds causes getScnFromTimestamp to fail DBZ-8119

    • Update Oracle connector doc to describe options for restricting access permissions for the Debezium LogMiner user DBZ-8124

    • ORA-00600: internal error code, arguments: [krvrdGetUID:2], [18446744073709551614], [], [], [], [], [], [], [], [], [], [] DBZ-8125

    • Use SQLSTATE to handle exceptions for replication slot creation command timeout DBZ-8127

    • ibmi Connector does not take custom properties into account anymore DBZ-8129

    • Unpredicatable ordering of table rows during insertion causing foreign key error DBZ-8130

    • schema_only crashes ibmi Connector DBZ-8131

    • Support larger database.server.id values DBZ-8134

    • Implement in process signal channel DBZ-8135

    • Re-add check to test for if assembly profile is active DBZ-8138

    • Validate log position method missing gtid info from SourceInfo DBZ-8140

    • Add LogMiner start mining session retry attempt counter to logs DBZ-8143

    • Open redo thread consistency check can lead to ORA-01291 - missing logfile DBZ-8144

    • SchemaOnlyRecoverySnapshotter not registered as an SPI service implementation DBZ-8147

    • Reduce logging verbosity of XStream DML event data DBZ-8148

    • When stopping the Oracle rac node the Debezium server throws an expections - ORA-12514: Cannot connect to database and retries DBZ-8149

    • Issue with Debezium Snapshot: DateTimeParseException with plugin pgoutput DBZ-8150

    • JDBC connector validation fails when using record_value with no primary.key.fields DBZ-8151

    • Vitess Connector Epoch should support parallelism & shard changes DBZ-8154

    • Add an option for publication.autocreate.mode to create a publication with no tables DBZ-8156

    • Taking RAC node offline and back online can lead to thread inconsistency DBZ-8162

    • Upgrade Outbox Extension to Quarkus 3.14.0 DBZ-8164

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    So you can safely implement the Outbox pattern without the physical outbox table! (DBZ-8103).

    Other changes

    Altogether, 48 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • MySQL has deprecated mysql_native_password usage DBZ-7049

    • Upgrade to Apicurio 2.5.8 or higher DBZ-7357

    • Incremental snapshots don’t work with CloudEvent converter DBZ-7601

    • Snapshot retrying logic falls into infinite retry loop DBZ-7860

    • Move Debezium Conductor repository under Debezium Organisation DBZ-7973

    • Log additional details about abandoned transactions DBZ-8044

    • ConverterBuilder doesn’t pass Headers to be manipulated DBZ-8082

    • Bump Debezium Server to Quarkus 3.8.5 DBZ-8095

    • Primary Key Update/ Snapshot Race Condition DBZ-8113

    • Support DECIMAL(p) Floating Point DBZ-8114

    • Recalculating mining range upper bounds causes getScnFromTimestamp to fail DBZ-8119

    • Update Oracle connector doc to describe options for restricting access permissions for the Debezium LogMiner user DBZ-8124

    • ORA-00600: internal error code, arguments: [krvrdGetUID:2], [18446744073709551614], [], [], [], [], [], [], [], [], [], [] DBZ-8125

    • Use SQLSTATE to handle exceptions for replication slot creation command timeout DBZ-8127

    • ibmi Connector does not take custom properties into account anymore DBZ-8129

    • Unpredicatable ordering of table rows during insertion causing foreign key error DBZ-8130

    • schema_only crashes ibmi Connector DBZ-8131

    • Support larger database.server.id values DBZ-8134

    • Implement in process signal channel DBZ-8135

    • Re-add check to test for if assembly profile is active DBZ-8138

    • Validate log position method missing gtid info from SourceInfo DBZ-8140

    • Add LogMiner start mining session retry attempt counter to logs DBZ-8143

    • Open redo thread consistency check can lead to ORA-01291 - missing logfile DBZ-8144

    • SchemaOnlyRecoverySnapshotter not registered as an SPI service implementation DBZ-8147

    • Reduce logging verbosity of XStream DML event data DBZ-8148

    • When stopping the Oracle rac node the Debezium server throws an expections - ORA-12514: Cannot connect to database and retries DBZ-8149

    • Issue with Debezium Snapshot: DateTimeParseException with plugin pgoutput DBZ-8150

    • JDBC connector validation fails when using record_value with no primary.key.fields DBZ-8151

    • Vitess Connector Epoch should support parallelism & shard changes DBZ-8154

    • Add an option for publication.autocreate.mode to create a publication with no tables DBZ-8156

    • Taking RAC node offline and back online can lead to thread inconsistency DBZ-8162

    • Upgrade Outbox Extension to Quarkus 3.14.0 DBZ-8164

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/09/05/debezium-2-7-2-final-released/index.html b/blog/2024/09/05/debezium-2-7-2-final-released/index.html index 7fc259a1bf..e68a53ce49 100644 --- a/blog/2024/09/05/debezium-2-7-2-final-released/index.html +++ b/blog/2024/09/05/debezium-2-7-2-final-released/index.html @@ -1 +1 @@ - Debezium 2.7.2.Final Released

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Let’s dive into these changes…​

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Debezium Server Kafka Sink

    The Debezium Server Kafka sink adapter could wait indefinitely when a Kafka broker becomes unavailable. A new configurable timeout has been added to the sink adapter to force the adapter to fail when the timeout is reached. The new option, debezium.sink.kafka.wait.message.delivery.timeout.ms, has a default value of 30 seconds. Please adjust this accordingly if the default is insufficient for your needs (DBZ-7575).

    SQL Server

    Normal JMX metrics are registered with a taskId attribute because SQL Server supports spawning a unique task per database mapping. Unfortunately, the JMX signal channel did not honor this and lead to some issues starting the JMX signal channel for each task. This has been fixed and the JMX signal channel’s MBean name now uses the taskId in its name to uniquely identify a signal channel for each database task when using a single connector to stream changes from multiple SQL Server databases (DBZ-8137).

    New features and improvements

    Debezium 2.7.2.Final also introduces a couple improvements and features, lets take a look at each individually.

    Informix DECIMAL(p) data type support

    The DECIMAL data type has seen a few improvements with Debezium for Informix. The data type can be defined with or without precision. When defined without precision, the connector interprets the column’s type as DECIMAL(16). However, users can also define columns as DECIMAL(p,s), where p is a precision up to 32 and s is the variable scale.

    In order to represent the upper-bounds of such decimal numbers using PRECISE decimal handling mode, the Informix connector had to be adjusted to take advantage of the Debezium VariableScaleDecimal semantic type, which enables Debezium to retain full precision of such floating point numbers when serialized in a Kafka message (DBZ-8114).

    For more information on how DECIMAL(p,s) data types are serialized, please see the Informix documentation.

    Fixes and stability improvements

    Debezium 2.7.2.Final introduces several bug fixes and stability improvements.

    Column truncation now respected by JsonConverter

    In earlier versions of Debezium, the column.truncate.* functionality returned a sliced ByteBuffer based on the truncation configuration. While this worked when using Avro, the truncation was not honored if your connector configuration used the JsonConverter as it operated on the entire underlying array rather than the given slice.

    With Debezium 2.7.2.Final, this has been fixed and the column truncation logic now explicitly creates a ByteBuffer based on a new array. This change allows the JsonConverter to respect the truncated column value during the serialization to Kafka (DBZ-8189).

    Snapshot mode schema_only_recovery has returned

    It was also reported that the schema_only_recovery snapshot mode was not functioning in Debezium 2.7. We have identified the issue and the schema-only recovery snapshot mode has returned as a viable choice for Debezium 2.7 once again.

    A gentle reminder that schema_only_recovery and schema_only were deprecated in Debezium 2.7 and have been removed in Debezium 3.0. Please be sure to use the recovery or no_data snapshot modes respectively moving forward when upgrading from Debezium 2.7 to 3.0.

    MariaDB missing from Debezium Server

    Some of you may have noticed that with the addition of the new official MariaDB connector in Debezium 2.7, that the connector was absent from the Debezium Server distribution. You’ll be happy to know that the Debezium 2.7.2.Final builds will now contain the MariaDB connector in the distribution, so you will no longer need to manually add it. (DBZ-8186).

    Oracle VECSYS tablespace marked as built-in

    If you’ve attempted to use Debezium with any of the latest Oracle 23ai images, you will have noticed a new tablespace has emerged called VECSYS. This tablespace includes a variety of system tables for storing metadata about the new Oracle 23 vector database and search features.

    We have found that the DBMS_METADATA.GET_DDL function used to capture the table’s DDL structure is not compatible with several database objects in the VECSYS tablespace and will lead to connector issues if you don’t explicitly set schema.history.internal.store.only.captured.tables.ddl to true. To address this problem, the tablespace is marked as built-in, meaning it is no longer eligible for capture (DBZ-8198).

    Other fixes

    • Incremental snapshots don’t work with CloudEvent converter (DBZ-7601)

    • Incremental snapshot fails with NPE if surrogate key doesn’t exist (DBZ-7797)

    • Postgres connector - null value processing for "money" type column. (DBZ-8027)

    • Using snapshot.include.collection.list with Oracle raises NullPointerException (DBZ-8032)

    • Log additional details about abandoned transactions (DBZ-8044)

    • Performance degradation when reconstructing (log.mining.stragtegy hybrid mode) (DBZ-8071)

    • Documentation for signals provides incorrect data-collection format for some connectors (DBZ-8090)

    • ORA-00600: internal error code, arguments: [krvrdGetUID:2], [18446744073709551614], [], [], [], [], [], [], [], [], [], [] (DBZ-8125)

    • ConvertingFailureIT#shouldFailConversionTimeTypeWithConnectModeWhenFailMode fails randomly (DBZ-8128)

    • Unpredicatable ordering of table rows during insertion causing foreign key error (DBZ-8130)

    • schema_only crashes ibmi Connector (DBZ-8131)

    • Support larger database.server.id values [DBZ-8134](https://issues.redhat.com/browse/DBZ-8134)

    • Add LogMiner start mining session retry attempt counter to logs (DBZ-8143)

    • Open redo thread consistency check can lead to ORA-01291 - missing logfile (DBZ-8144)

    • Reduce logging verbosity of XStream DML event data (DBZ-8148)

    • When stopping the Oracle rac node the Debezium server throws an expectations - ORA-12514: Cannot connect to database and retries (DBZ-8149)

    • Issue with Debezium Snapshot: DateTimeParseException with plugin pgoutput (DBZ-8150)

    • JDBC connector validation fails when using record_value with no primary.key.fields (DBZ-8151)

    • Taking RAC node offline and back online can lead to thread inconsistency (DBZ-8162)

    • Postgres JSONB Fields are not supported with Reselect Post Processor (DBZ-8168)

    • NullPointerException (schemaUpdateCache is null) when restarting Oracle engine (DBZ-8187)

    • XStream may fail to attach on retry if previous attempt failed (DBZ-8188)

    • Reduce log verbosity of OpenLogReplicator SCN confirmation (DBZ-8201)

    In total there were 31 issues resolved in Debezium 2.7.2.Final. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Ashish Binu, Chris Cranford, Jiri Pechanec, Jochen Schalanda, Lars M. Johansson, Mario Fiore Vitale, Minh Son Nguyen, Mohamed El Shaer, moyq5, Robert Roldan, Sergei Kazakov, Thomas Thornton, and Vojtech Juranek!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.7.2.Final Released

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Let’s dive into these changes…​

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Debezium Server Kafka Sink

    The Debezium Server Kafka sink adapter could wait indefinitely when a Kafka broker becomes unavailable. A new configurable timeout has been added to the sink adapter to force the adapter to fail when the timeout is reached. The new option, debezium.sink.kafka.wait.message.delivery.timeout.ms, has a default value of 30 seconds. Please adjust this accordingly if the default is insufficient for your needs (DBZ-7575).

    SQL Server

    Normal JMX metrics are registered with a taskId attribute because SQL Server supports spawning a unique task per database mapping. Unfortunately, the JMX signal channel did not honor this and lead to some issues starting the JMX signal channel for each task. This has been fixed and the JMX signal channel’s MBean name now uses the taskId in its name to uniquely identify a signal channel for each database task when using a single connector to stream changes from multiple SQL Server databases (DBZ-8137).

    New features and improvements

    Debezium 2.7.2.Final also introduces a couple improvements and features, lets take a look at each individually.

    Informix DECIMAL(p) data type support

    The DECIMAL data type has seen a few improvements with Debezium for Informix. The data type can be defined with or without precision. When defined without precision, the connector interprets the column’s type as DECIMAL(16). However, users can also define columns as DECIMAL(p,s), where p is a precision up to 32 and s is the variable scale.

    In order to represent the upper-bounds of such decimal numbers using PRECISE decimal handling mode, the Informix connector had to be adjusted to take advantage of the Debezium VariableScaleDecimal semantic type, which enables Debezium to retain full precision of such floating point numbers when serialized in a Kafka message (DBZ-8114).

    For more information on how DECIMAL(p,s) data types are serialized, please see the Informix documentation.

    Fixes and stability improvements

    Debezium 2.7.2.Final introduces several bug fixes and stability improvements.

    Column truncation now respected by JsonConverter

    In earlier versions of Debezium, the column.truncate.* functionality returned a sliced ByteBuffer based on the truncation configuration. While this worked when using Avro, the truncation was not honored if your connector configuration used the JsonConverter as it operated on the entire underlying array rather than the given slice.

    With Debezium 2.7.2.Final, this has been fixed and the column truncation logic now explicitly creates a ByteBuffer based on a new array. This change allows the JsonConverter to respect the truncated column value during the serialization to Kafka (DBZ-8189).

    Snapshot mode schema_only_recovery has returned

    It was also reported that the schema_only_recovery snapshot mode was not functioning in Debezium 2.7. We have identified the issue and the schema-only recovery snapshot mode has returned as a viable choice for Debezium 2.7 once again.

    A gentle reminder that schema_only_recovery and schema_only were deprecated in Debezium 2.7 and have been removed in Debezium 3.0. Please be sure to use the recovery or no_data snapshot modes respectively moving forward when upgrading from Debezium 2.7 to 3.0.

    MariaDB missing from Debezium Server

    Some of you may have noticed that with the addition of the new official MariaDB connector in Debezium 2.7, that the connector was absent from the Debezium Server distribution. You’ll be happy to know that the Debezium 2.7.2.Final builds will now contain the MariaDB connector in the distribution, so you will no longer need to manually add it. (DBZ-8186).

    Oracle VECSYS tablespace marked as built-in

    If you’ve attempted to use Debezium with any of the latest Oracle 23ai images, you will have noticed a new tablespace has emerged called VECSYS. This tablespace includes a variety of system tables for storing metadata about the new Oracle 23 vector database and search features.

    We have found that the DBMS_METADATA.GET_DDL function used to capture the table’s DDL structure is not compatible with several database objects in the VECSYS tablespace and will lead to connector issues if you don’t explicitly set schema.history.internal.store.only.captured.tables.ddl to true. To address this problem, the tablespace is marked as built-in, meaning it is no longer eligible for capture (DBZ-8198).

    Other fixes

    • Incremental snapshots don’t work with CloudEvent converter (DBZ-7601)

    • Incremental snapshot fails with NPE if surrogate key doesn’t exist (DBZ-7797)

    • Postgres connector - null value processing for "money" type column. (DBZ-8027)

    • Using snapshot.include.collection.list with Oracle raises NullPointerException (DBZ-8032)

    • Log additional details about abandoned transactions (DBZ-8044)

    • Performance degradation when reconstructing (log.mining.stragtegy hybrid mode) (DBZ-8071)

    • Documentation for signals provides incorrect data-collection format for some connectors (DBZ-8090)

    • ORA-00600: internal error code, arguments: [krvrdGetUID:2], [18446744073709551614], [], [], [], [], [], [], [], [], [], [] (DBZ-8125)

    • ConvertingFailureIT#shouldFailConversionTimeTypeWithConnectModeWhenFailMode fails randomly (DBZ-8128)

    • Unpredicatable ordering of table rows during insertion causing foreign key error (DBZ-8130)

    • schema_only crashes ibmi Connector (DBZ-8131)

    • Support larger database.server.id values [DBZ-8134](https://issues.redhat.com/browse/DBZ-8134)

    • Add LogMiner start mining session retry attempt counter to logs (DBZ-8143)

    • Open redo thread consistency check can lead to ORA-01291 - missing logfile (DBZ-8144)

    • Reduce logging verbosity of XStream DML event data (DBZ-8148)

    • When stopping the Oracle rac node the Debezium server throws an expectations - ORA-12514: Cannot connect to database and retries (DBZ-8149)

    • Issue with Debezium Snapshot: DateTimeParseException with plugin pgoutput (DBZ-8150)

    • JDBC connector validation fails when using record_value with no primary.key.fields (DBZ-8151)

    • Taking RAC node offline and back online can lead to thread inconsistency (DBZ-8162)

    • Postgres JSONB Fields are not supported with Reselect Post Processor (DBZ-8168)

    • NullPointerException (schemaUpdateCache is null) when restarting Oracle engine (DBZ-8187)

    • XStream may fail to attach on retry if previous attempt failed (DBZ-8188)

    • Reduce log verbosity of OpenLogReplicator SCN confirmation (DBZ-8201)

    In total there were 31 issues resolved in Debezium 2.7.2.Final. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Ashish Binu, Chris Cranford, Jiri Pechanec, Jochen Schalanda, Lars M. Johansson, Mario Fiore Vitale, Minh Son Nguyen, Mohamed El Shaer, moyq5, Robert Roldan, Sergei Kazakov, Thomas Thornton, and Vojtech Juranek!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/09/16/debezium-3-0-cr1-released/index.html b/blog/2024/09/16/debezium-3-0-cr1-released/index.html index 21b67fcd6b..acde96f539 100644 --- a/blog/2024/09/16/debezium-3-0-cr1-released/index.html +++ b/blog/2024/09/16/debezium-3-0-cr1-released/index.html @@ -1 +1 @@ - Debezium 3.0.0.CR1 Released

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    PostgreSQL

    Vector data type names introduced for PostgreSQL were too database specific. In order to provide a more generic support for vector data types, the names were adjusted to support PostgreSQL and MySQL (DBZ-8183).

    Oracle

    Several deprecated configuration properties have been removed:

    • log.mining.transaction.retention.hours replaced by log.mining.transaction.retention.ms

    • log.mining.archive.destination.name replaced by archive.destination.name

    • log.mining.archive.log.hours replaced by archive.log.hours

    Please be sure to update your Oracle connector configuration when using the deprecated configuration options to retain old behavior (DBZ-8181).

    The default log.mining.strategy value has changed and is now online_catalog. As a vast majority of users typically use this strategy, and it generally performs better than redo_log_catalog, we felt this change made since in Debezium 3. If your deployments were previously relying on the default redo_log_catalog strategy, you will need to explicitly add log.mining.strategy to the connector configuration and specify the value redo_log_catalog when upgrading (DBZ-3656).

    SQL Server

    The JMX signaling and notifications for SQL Server did not work correctly when a connector was configured with multiple databases spawning multiple tasks. To resolve this issue, it was necessary to change the naming of signalling and notification MBean names to make sure they are unique per task (DBZ-8137).

    New features and improvements

    Debezium 3.0.0.CR1 also introduces several improvements and features, lets take a look at each individually.

    MySQL 9

    Oracle unveiled the first innovation release of MySQL 9.0 on July 1st, 2024. We are pleased to announce that we’ve tested and verified that MySQL 9.0 works and is supported starting with Debezium 3.0 (DBZ-8030). If you experience any issues or problems, please be sure to open an issue.

    MySQL Vector data types

    One of the newest features being added to relational databases is the introduction of vector data types. In addition to support for MySQL 9.0, Debezium 3 also introduces support for the new VECTOR(n) data type, which supports a list of floating-point values that can be expressed as a binary or list-formatted string. More information is available in the MySQL documentation about the vector data type (DBZ-8157).

    In addition, the MySQL grammar has been updated to reflect support for the new MySQL 9.0 vector functions (DBZ-8210). More information about these functions are also in the MySQL documentation.

    MariaDB 11.4.3

    Debezium 3 also introduces support for the most recent non-rolling release of MariaDB, 11.4.3 (DBZ-8226). We are also closing monitoring the MariaDB 11.6 release cycle and plan to introduce vector data type support when MariaDB 11.6 becomes stable.

    Oracle offline RAC node flush improvements

    In recent improvements to the Oracle RAC node flush strategy, it was determined that a three-second delay was being forced when an Oracle RAC node was taken offline by the database administrator. Since an Oracle RAC node cannot perform any writes to the redo logs while offline, this three-second delay introduced an unnecessary amount of latency while the node remained offline.

    In Debezium 3, the three-second delay is only imposed if a connection is active to an Oracle RAC node; however, the flush SQL operation was unsuccessful. This means that when database administrators take RAC nodes offline for maintenance, no latency overhead will be imposed by the connector (DBZ-8177).

    Vitess inherit shard epoch

    A new Vitess connector configuration property has been added to control whether epochs of a new shard, after a re-shard operation, inherits epochs from its parent shard. This new configuration property, vitess.inherit.epoch, defaults to false and isn’t enabled by default (DBZ-8163).

    Other changes

    Altogether, 45 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • Make ORA-00600 - krvrdccs10 automatically retriable DBZ-5009

    • Incremental snapshot fails with NPE if surrogate key doesn’t exist DBZ-7797

    • MySQL 8.4 incompatibility due to removed SQL commands DBZ-7838

    • Postgres connector - null value processing for "money" type column. DBZ-8027

    • Using snapshot.include.collection.list with Oracle raises NullPointerException DBZ-8032

    • Performance degradation when reconstructing (log.mining.stragtegy hybrid mode) DBZ-8071

    • Documentation for signals provides incorrect data-collection format for some connectors DBZ-8090

    • The source data type exceeds the debezium data type and cannot deserialize the object DBZ-8142

    • Refactor engine signal support DBZ-8160

    • Incorrect use of generic types in tests DBZ-8166

    • Latest Informix JDBC Driver DBZ-8167

    • Postgres JSONB Fields are not supported with Reselect Post Processor DBZ-8168

    • upgrade Adobe s3mock to version 3.10.0 DBZ-8169

    • Include Jackson libraries to JDBC connector Docker image distribution DBZ-8175

    • Ehcache fails to start, throwing "Invaild XML Configuration" DBZ-8178

    • Enable snapshot.database.errors.max.retriesEnable during Oracle tests DBZ-8184

    • Change event for a logical decoding message doesn’t contain transaction field DBZ-8185

    • Add MariaDB connector server distribution DBZ-8186

    • NullPointerException (schemaUpdateCache is null) when restarting Oracle engine DBZ-8187

    • XStream may fail to attach on retry if previous attempt failed DBZ-8188

    • Truncate byte buffer should return a new array DBZ-8189

    • Update Vitess example to Debezium 2.7/Vitess 19 DBZ-8196

    • OracleConnectorIT test shouldGracefullySkipObjectBasedTables can timeout prematurely DBZ-8197

    • Exclude Oracle 23 VECSYS tablespace from capture DBZ-8198

    • AbstractProcessorTest uses an incorrect database name when run against Oracle 23 Free edition DBZ-8199

    • Reduce log verbosity of OpenLogReplicator SCN confirmation DBZ-8201

    • Support for older MongoDb versions DBZ-8202

    • DDL statement couldn’t be parsed: REVOKE IF EXISTS DBZ-8209

    • System testsuite fails with route name being too long DBZ-8213

    • Oracle TableSchemaBuilder provides wrong column name in error message DBZ-8217

    • Using ehcache in Kafka connect throws an XMLConfiguration parse exception DBZ-8219

    • Implement separate source and sink connector sections in documentation navigation DBZ-8220

    • OcpJdbcSinkConnectorIT fails DBZ-8228

    • Container image does not install correct apicurio deps DBZ-8230

    • Add information about helm chart installation to operator readme DBZ-8233

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 3.0.0.CR1 Released

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    Breaking changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    PostgreSQL

    Vector data type names introduced for PostgreSQL were too database specific. In order to provide a more generic support for vector data types, the names were adjusted to support PostgreSQL and MySQL (DBZ-8183).

    Oracle

    Several deprecated configuration properties have been removed:

    • log.mining.transaction.retention.hours replaced by log.mining.transaction.retention.ms

    • log.mining.archive.destination.name replaced by archive.destination.name

    • log.mining.archive.log.hours replaced by archive.log.hours

    Please be sure to update your Oracle connector configuration when using the deprecated configuration options to retain old behavior (DBZ-8181).

    The default log.mining.strategy value has changed and is now online_catalog. As a vast majority of users typically use this strategy, and it generally performs better than redo_log_catalog, we felt this change made since in Debezium 3. If your deployments were previously relying on the default redo_log_catalog strategy, you will need to explicitly add log.mining.strategy to the connector configuration and specify the value redo_log_catalog when upgrading (DBZ-3656).

    SQL Server

    The JMX signaling and notifications for SQL Server did not work correctly when a connector was configured with multiple databases spawning multiple tasks. To resolve this issue, it was necessary to change the naming of signalling and notification MBean names to make sure they are unique per task (DBZ-8137).

    New features and improvements

    Debezium 3.0.0.CR1 also introduces several improvements and features, lets take a look at each individually.

    MySQL 9

    Oracle unveiled the first innovation release of MySQL 9.0 on July 1st, 2024. We are pleased to announce that we’ve tested and verified that MySQL 9.0 works and is supported starting with Debezium 3.0 (DBZ-8030). If you experience any issues or problems, please be sure to open an issue.

    MySQL Vector data types

    One of the newest features being added to relational databases is the introduction of vector data types. In addition to support for MySQL 9.0, Debezium 3 also introduces support for the new VECTOR(n) data type, which supports a list of floating-point values that can be expressed as a binary or list-formatted string. More information is available in the MySQL documentation about the vector data type (DBZ-8157).

    In addition, the MySQL grammar has been updated to reflect support for the new MySQL 9.0 vector functions (DBZ-8210). More information about these functions are also in the MySQL documentation.

    MariaDB 11.4.3

    Debezium 3 also introduces support for the most recent non-rolling release of MariaDB, 11.4.3 (DBZ-8226). We are also closing monitoring the MariaDB 11.6 release cycle and plan to introduce vector data type support when MariaDB 11.6 becomes stable.

    Oracle offline RAC node flush improvements

    In recent improvements to the Oracle RAC node flush strategy, it was determined that a three-second delay was being forced when an Oracle RAC node was taken offline by the database administrator. Since an Oracle RAC node cannot perform any writes to the redo logs while offline, this three-second delay introduced an unnecessary amount of latency while the node remained offline.

    In Debezium 3, the three-second delay is only imposed if a connection is active to an Oracle RAC node; however, the flush SQL operation was unsuccessful. This means that when database administrators take RAC nodes offline for maintenance, no latency overhead will be imposed by the connector (DBZ-8177).

    Vitess inherit shard epoch

    A new Vitess connector configuration property has been added to control whether epochs of a new shard, after a re-shard operation, inherits epochs from its parent shard. This new configuration property, vitess.inherit.epoch, defaults to false and isn’t enabled by default (DBZ-8163).

    Other changes

    Altogether, 45 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    • Make ORA-00600 - krvrdccs10 automatically retriable DBZ-5009

    • Incremental snapshot fails with NPE if surrogate key doesn’t exist DBZ-7797

    • MySQL 8.4 incompatibility due to removed SQL commands DBZ-7838

    • Postgres connector - null value processing for "money" type column. DBZ-8027

    • Using snapshot.include.collection.list with Oracle raises NullPointerException DBZ-8032

    • Performance degradation when reconstructing (log.mining.stragtegy hybrid mode) DBZ-8071

    • Documentation for signals provides incorrect data-collection format for some connectors DBZ-8090

    • The source data type exceeds the debezium data type and cannot deserialize the object DBZ-8142

    • Refactor engine signal support DBZ-8160

    • Incorrect use of generic types in tests DBZ-8166

    • Latest Informix JDBC Driver DBZ-8167

    • Postgres JSONB Fields are not supported with Reselect Post Processor DBZ-8168

    • upgrade Adobe s3mock to version 3.10.0 DBZ-8169

    • Include Jackson libraries to JDBC connector Docker image distribution DBZ-8175

    • Ehcache fails to start, throwing "Invaild XML Configuration" DBZ-8178

    • Enable snapshot.database.errors.max.retriesEnable during Oracle tests DBZ-8184

    • Change event for a logical decoding message doesn’t contain transaction field DBZ-8185

    • Add MariaDB connector server distribution DBZ-8186

    • NullPointerException (schemaUpdateCache is null) when restarting Oracle engine DBZ-8187

    • XStream may fail to attach on retry if previous attempt failed DBZ-8188

    • Truncate byte buffer should return a new array DBZ-8189

    • Update Vitess example to Debezium 2.7/Vitess 19 DBZ-8196

    • OracleConnectorIT test shouldGracefullySkipObjectBasedTables can timeout prematurely DBZ-8197

    • Exclude Oracle 23 VECSYS tablespace from capture DBZ-8198

    • AbstractProcessorTest uses an incorrect database name when run against Oracle 23 Free edition DBZ-8199

    • Reduce log verbosity of OpenLogReplicator SCN confirmation DBZ-8201

    • Support for older MongoDb versions DBZ-8202

    • DDL statement couldn’t be parsed: REVOKE IF EXISTS DBZ-8209

    • System testsuite fails with route name being too long DBZ-8213

    • Oracle TableSchemaBuilder provides wrong column name in error message DBZ-8217

    • Using ehcache in Kafka connect throws an XMLConfiguration parse exception DBZ-8219

    • Implement separate source and sink connector sections in documentation navigation DBZ-8220

    • OcpJdbcSinkConnectorIT fails DBZ-8228

    • Container image does not install correct apicurio deps DBZ-8230

    • Add information about helm chart installation to operator readme DBZ-8233

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/09/18/quay-io-reminder/index.html b/blog/2024/09/18/quay-io-reminder/index.html index 76e19623fc..50a1cc7a76 100644 --- a/blog/2024/09/18/quay-io-reminder/index.html +++ b/blog/2024/09/18/quay-io-reminder/index.html @@ -1 +1 @@ - Moving container images to quay.io

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    In April 2023, we announced that we intended to sunset the publishing of Debezium container images to docker.io. As we are now more than a year since that announcement, we believe sufficient time has passed to end dual-publishing.

    With the release of Debezium 3.0.0.Final, we will no longer publish container image updates to docker.io. Older images for Debezium 2.x and 1.x will remain on docker.io; however, all future Debezium 2.7.x and 3.x or later images will only be available on quay.io.

    The following chart shows the new locations for all images being sunset:

    Old Repository New Pull command

    debezium/base

    docker pull quay.io/debezium/base

    debezium/connect

    docker pull quay.io/debezium/connect

    debezium/connect-base

    docker pull quay.io/debezium/connect-base

    debezium/kafka

    docker pull quay.io/debezium/kafka

    debezium/example-mariadb

    docker pull quay.io/debezium/example-mariadb

    debezium/example-mongodb

    docker pull quay.io/debezium/example-mongodb

    debezium/example-mysql

    docker pull quay.io/debezium/example-mysql

    debezium/example-mysql-master

    docker pull quay.io/debezium/example-mysql-master

    debezium/example-mysql-replica

    docker pull quay.io/debezium/example-mysql-replica

    debezium/example-postgres

    docker pull quay.io/debezium/example-postgres

    debezium/mongo-initiator

    docker pull quay.io/debezium/mongo-initiator

    debezium/operator

    docker pull quay.io/debezium/operator

    debezium/postgres

    docker pull quay.io/debezium/postgres

    debezium/server

    docker pull quay.io/debezium/server

    debezium/tooling

    docker pull quay.io/debezium/tooling

    debezium/website-builder

    docker pull quay.io/debezium/website-builder

    debezium/zookeeper

    docker pull quay.io/debezium/zookeeper

    If you experience any issues or problems, please be sure to reach out to us on our mailing list or Zulip chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Moving container images to quay.io

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    In April 2023, we announced that we intended to sunset the publishing of Debezium container images to docker.io. As we are now more than a year since that announcement, we believe sufficient time has passed to end dual-publishing.

    With the release of Debezium 3.0.0.Final, we will no longer publish container image updates to docker.io. Older images for Debezium 2.x and 1.x will remain on docker.io; however, all future Debezium 2.7.x and 3.x or later images will only be available on quay.io.

    The following chart shows the new locations for all images being sunset:

    Old Repository New Pull command

    debezium/base

    docker pull quay.io/debezium/base

    debezium/connect

    docker pull quay.io/debezium/connect

    debezium/connect-base

    docker pull quay.io/debezium/connect-base

    debezium/kafka

    docker pull quay.io/debezium/kafka

    debezium/example-mariadb

    docker pull quay.io/debezium/example-mariadb

    debezium/example-mongodb

    docker pull quay.io/debezium/example-mongodb

    debezium/example-mysql

    docker pull quay.io/debezium/example-mysql

    debezium/example-mysql-master

    docker pull quay.io/debezium/example-mysql-master

    debezium/example-mysql-replica

    docker pull quay.io/debezium/example-mysql-replica

    debezium/example-postgres

    docker pull quay.io/debezium/example-postgres

    debezium/mongo-initiator

    docker pull quay.io/debezium/mongo-initiator

    debezium/operator

    docker pull quay.io/debezium/operator

    debezium/postgres

    docker pull quay.io/debezium/postgres

    debezium/server

    docker pull quay.io/debezium/server

    debezium/tooling

    docker pull quay.io/debezium/tooling

    debezium/website-builder

    docker pull quay.io/debezium/website-builder

    debezium/zookeeper

    docker pull quay.io/debezium/zookeeper

    If you experience any issues or problems, please be sure to reach out to us on our mailing list or Zulip chat.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/09/23/debezium-2-7-3-final-released/index.html b/blog/2024/09/23/debezium-2-7-3-final-released/index.html index 024d58c3ad..4203cb2ce2 100644 --- a/blog/2024/09/23/debezium-2-7-3-final-released/index.html +++ b/blog/2024/09/23/debezium-2-7-3-final-released/index.html @@ -1 +1 @@ - Debezium 2.7.3.Final Released

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    New features and improvements

    Debezium 2.7.3.Final introduces a couple improvements and features, lets take a look at each individually.

    MariaDB 11.4.3

    Debezium 3 also introduces support for the most recent non-rolling release of MariaDB, 11.4.3 (DBZ-8226). We are also closing monitoring the MariaDB 11.6 release cycle and plan to introduce vector data type support when MariaDB 11.6 becomes stable.

    Oracle offline RAC node flush improvements

    In recent improvements to the Oracle RAC node flush strategy, it was determined that a three-second delay was being forced when an Oracle RAC node was taken offline by the database administrator. Since an Oracle RAC node cannot perform any writes to the redo logs while offline, this three-second delay introduced an unnecessary amount of latency while the node remained offline.

    In Debezium 3, the three-second delay is only imposed if a connection is active to an Oracle RAC node; however, the flush SQL operation was unsuccessful. This means that when database administrators take RAC nodes offline for maintenance, no latency overhead will be imposed by the connector (DBZ-8177).

    Other fixes

    • Make ORA-00600 - krvrdccs10 automatically retriable DBZ-5009

    • DDL statement couldn’t be parsed: REVOKE IF EXISTS DBZ-8209

    • Oracle TableSchemaBuilder provides wrong column name in error message DBZ-8217

    • Implement separate source and sink connector sections in documentation navigation DBZ-8220

    • Debezium does not restart automatically after throwing an ORA-00600 krvrdccs30 error DBZ-8223

    • JDBC sink doesn’t include fields as per documentation DBZ-8224

    • Broken link to Streams doc about configuring logging DBZ-8231

    • Document passthrough hibernate.* properties for the JDBC connector DBZ-8232

    • Unbounded number of processing threads in async engine DBZ-8237

    • Streaming metrics are stuck after an ad-hoc blocking snapshot DBZ-8238

    • DDL statement couldn’t be parsed with IF EXISTS DBZ-8240

    • Random engine factory used by default DBZ-8241

    • Support BLOB with EMPTY_BLOB() as default DBZ-8248

    • AbstractConnectorTest consumeRecordsUntil may prematurely exit loop DBZ-8250

    In total there were 17 issues resolved in Debezium 2.7.3.Final. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Andrei Leibovski, Anisha Mohanty, Chris Cranford, Jakub Cechacek, Jiri Pechanec, Oren Elias, Robert Roldan, Vojtech Juranek, and martin!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 2.7.3.Final Released

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    New features and improvements

    Debezium 2.7.3.Final introduces a couple improvements and features, lets take a look at each individually.

    MariaDB 11.4.3

    Debezium 3 also introduces support for the most recent non-rolling release of MariaDB, 11.4.3 (DBZ-8226). We are also closing monitoring the MariaDB 11.6 release cycle and plan to introduce vector data type support when MariaDB 11.6 becomes stable.

    Oracle offline RAC node flush improvements

    In recent improvements to the Oracle RAC node flush strategy, it was determined that a three-second delay was being forced when an Oracle RAC node was taken offline by the database administrator. Since an Oracle RAC node cannot perform any writes to the redo logs while offline, this three-second delay introduced an unnecessary amount of latency while the node remained offline.

    In Debezium 3, the three-second delay is only imposed if a connection is active to an Oracle RAC node; however, the flush SQL operation was unsuccessful. This means that when database administrators take RAC nodes offline for maintenance, no latency overhead will be imposed by the connector (DBZ-8177).

    Other fixes

    • Make ORA-00600 - krvrdccs10 automatically retriable DBZ-5009

    • DDL statement couldn’t be parsed: REVOKE IF EXISTS DBZ-8209

    • Oracle TableSchemaBuilder provides wrong column name in error message DBZ-8217

    • Implement separate source and sink connector sections in documentation navigation DBZ-8220

    • Debezium does not restart automatically after throwing an ORA-00600 krvrdccs30 error DBZ-8223

    • JDBC sink doesn’t include fields as per documentation DBZ-8224

    • Broken link to Streams doc about configuring logging DBZ-8231

    • Document passthrough hibernate.* properties for the JDBC connector DBZ-8232

    • Unbounded number of processing threads in async engine DBZ-8237

    • Streaming metrics are stuck after an ad-hoc blocking snapshot DBZ-8238

    • DDL statement couldn’t be parsed with IF EXISTS DBZ-8240

    • Random engine factory used by default DBZ-8241

    • Support BLOB with EMPTY_BLOB() as default DBZ-8248

    • AbstractConnectorTest consumeRecordsUntil may prematurely exit loop DBZ-8250

    In total there were 17 issues resolved in Debezium 2.7.3.Final. The list of changes can also be found in our release notes.

    A big thank you to all the contributors from the community who worked diligently on this release: Andrei Leibovski, Anisha Mohanty, Chris Cranford, Jakub Cechacek, Jiri Pechanec, Oren Elias, Robert Roldan, Vojtech Juranek, and martin!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/09/25/debezium-3-0-cr2-released/index.html b/blog/2024/09/25/debezium-3-0-cr2-released/index.html index 233c1966f4..44611e99db 100644 --- a/blog/2024/09/25/debezium-3-0-cr2-released/index.html +++ b/blog/2024/09/25/debezium-3-0-cr2-released/index.html @@ -1 +1 @@ - Debezium 3.0.0.CR2 Released

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    New features and improvements

    Debezium 3.0.0.CR2 introduces several improvements and features, lets take a look at each individually.

    PostgreSQL isolation level support

    A longstanding enhancement for snapshot isolation support for PostgreSQL is now here! A new connector configuration property, snapshot.isolation.mode, allows the connector to control the consistency used while executing the initial and ad-hoc blocking snapshot steps. There are four isolation levels: serializable (the default), repeatable_read, read_committed, and read_uncommitted. You can find details about these isolation levels and how they work with PostgreSQL in the documentation (DBZ-1252).

    JDBC retry flushes on specific failures

    The JDBC sink uses a set of buffers to improve the throughput writes to the target database. In some use cases, the flush operation of these buffers may face specific exceptions due to locks due to other applications that may have locked a specific row or table. To improve the user experience, two new configuration properties have been added:

    flush.failure.max.retries

    Defines the number of retries when a flush failure occurs.

    flush.failure.retries.wait.ms

    Defines the number of milliseconds to wait between a retry.

    The retry feature is enabled by default, attempting to retry up to a maximum 5 attempts, with a 1-second delay between retries. If you prefer retries disabled, setting flush.failure.max.retries to 0 would disable this feature (DBZ-7291).

    Oracle EXTENDED max string size support

    Oracle extended strings is a feature that allows the traditional 4000 byte limit on character data to be raised to 32K. This is done by applying a database upgrade to set the database parameter max_string_size to EXTENDED. The extended string feature then allows using the same SQL syntax used for 4000 byte or smaller character data to be used for character data up to 32K without forcing you to use CLOB-based operations.

    With Debezium 3, you can now use the Oracle connector with databases that use extended strings and capture the changes directly from the transaction logs (DBZ-8039). As extended strings are effectively CLOB operations on the database level, mining such column types require setting lob.enabled to true.

    As this new feature is experimental, we’d love to hear any feedback from the community!

    Oracle CLOB/BLOB default value support

    In some cases, Oracle users may define tables with a CLOB or BLOB as required, using the EMPTY_BLOB() or EMPTY_CLOB() function to define a default when the field isn’t supplied. In previous builds, these special functions were not evaluated by Debezium, and such columns would have been emitted as optional rather than not optional.

    Starting with Debezium 3, when an EMPTY_BLOB() or EMPTY_CLOB() default value is specified, the field will be emitted as not optional. Additionally, the field contain the appropriate default value, an empty byte array or an empty string respectively (DBZ-8248).

    Other changes

    • Debezium does not restart automatically after throwing an ORA-00600 krvrdccs30 error DBZ-8223

    • JDBC sink doesn’t include fields as per documentation DBZ-8224

    • Broken link to Streams doc about configuring logging DBZ-8231

    • Document passthrough hibernate.* properties for the JDBC connector DBZ-8232

    • Unbounded number of processing threads in async engine DBZ-8237

    • Streaming metrics are stuck after an ad-hoc blocking snapshot DBZ-8238

    • DDL statement couldn’t be parsed with IF EXISTS DBZ-8240

    • Random engine factory used by default DBZ-8241

    • JDBC sink test suite should use the debezium/connect:nightly image for e2e tests DBZ-8245

    • Bump Infinispan to 15.0.8.Final DBZ-8246

    • AbstractConnectorTest consumeRecordsUntil may prematurely exit loop DBZ-8250

    • Performance Regression in Debezium Server Kafka after DBZ-7575 fix DBZ-8251

    • Add a note to the docs about JDBC batch retry configs DBZ-8252

    • Fix conditionalization in shared MariaDB/MySQL file DBZ-8254

    • Error Prone library included in MySQL connector DBZ-8258

    • Debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-8259

    • Add Oracle FUTC license DBZ-8260

    • Remove Oracle libs from product assembly package DBZ-8261

    • debezium-connector-binlog does not need MariaDB dependency DBZ-8263

    • Provide subset package for Debezium Server DBZ-8264

    Altogether, 44 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    A huge thank you to all contributors from the community who worked on this release: Andrei Leibovski, Anisha Mohanty, Chris Cranford, Cody Holmes, Inki Hwang, Jiri Pechanec, Mario Fiore Vitale, Robert Roldan, Vojtěch Juránek, and martin!

    What’s next & Outlook?

    First and foremost, assuming there are no critical issues, you can expect the final release of Debezium 3 sometime next week. As we begin to shift toward the remainder of 2024, it’s important to discuss what lies ahead for the remainder of this year and 2025.

    For the remainder of the 2024 calendar year, the team will continue to deliver maintenance and bugfix releases for Debezium 3. These will continue at our normal cadence, approximately every 2-3 weeks, barring issues reported and those fixed. However, what will be changing this quarter is that there won’t be a minor release in December like years past. This decision is to allow the team to focus on a number of low-hanging fruit activities that don’t necessarily or directly tie to development tasks.

    In addition, as we get closer to the end of the year, we’ll be putting out our vision of the 2025 roadmap and what that includes. Now is a perfect time to think about what you’d like to see on that roadmap and get involved. You can always reach us on our mailing list or in our Zulip chat!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 3.0.0.CR2 Released

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    New features and improvements

    Debezium 3.0.0.CR2 introduces several improvements and features, lets take a look at each individually.

    PostgreSQL isolation level support

    A longstanding enhancement for snapshot isolation support for PostgreSQL is now here! A new connector configuration property, snapshot.isolation.mode, allows the connector to control the consistency used while executing the initial and ad-hoc blocking snapshot steps. There are four isolation levels: serializable (the default), repeatable_read, read_committed, and read_uncommitted. You can find details about these isolation levels and how they work with PostgreSQL in the documentation (DBZ-1252).

    JDBC retry flushes on specific failures

    The JDBC sink uses a set of buffers to improve the throughput writes to the target database. In some use cases, the flush operation of these buffers may face specific exceptions due to locks due to other applications that may have locked a specific row or table. To improve the user experience, two new configuration properties have been added:

    flush.failure.max.retries

    Defines the number of retries when a flush failure occurs.

    flush.failure.retries.wait.ms

    Defines the number of milliseconds to wait between a retry.

    The retry feature is enabled by default, attempting to retry up to a maximum 5 attempts, with a 1-second delay between retries. If you prefer retries disabled, setting flush.failure.max.retries to 0 would disable this feature (DBZ-7291).

    Oracle EXTENDED max string size support

    Oracle extended strings is a feature that allows the traditional 4000 byte limit on character data to be raised to 32K. This is done by applying a database upgrade to set the database parameter max_string_size to EXTENDED. The extended string feature then allows using the same SQL syntax used for 4000 byte or smaller character data to be used for character data up to 32K without forcing you to use CLOB-based operations.

    With Debezium 3, you can now use the Oracle connector with databases that use extended strings and capture the changes directly from the transaction logs (DBZ-8039). As extended strings are effectively CLOB operations on the database level, mining such column types require setting lob.enabled to true.

    As this new feature is experimental, we’d love to hear any feedback from the community!

    Oracle CLOB/BLOB default value support

    In some cases, Oracle users may define tables with a CLOB or BLOB as required, using the EMPTY_BLOB() or EMPTY_CLOB() function to define a default when the field isn’t supplied. In previous builds, these special functions were not evaluated by Debezium, and such columns would have been emitted as optional rather than not optional.

    Starting with Debezium 3, when an EMPTY_BLOB() or EMPTY_CLOB() default value is specified, the field will be emitted as not optional. Additionally, the field contain the appropriate default value, an empty byte array or an empty string respectively (DBZ-8248).

    Other changes

    • Debezium does not restart automatically after throwing an ORA-00600 krvrdccs30 error DBZ-8223

    • JDBC sink doesn’t include fields as per documentation DBZ-8224

    • Broken link to Streams doc about configuring logging DBZ-8231

    • Document passthrough hibernate.* properties for the JDBC connector DBZ-8232

    • Unbounded number of processing threads in async engine DBZ-8237

    • Streaming metrics are stuck after an ad-hoc blocking snapshot DBZ-8238

    • DDL statement couldn’t be parsed with IF EXISTS DBZ-8240

    • Random engine factory used by default DBZ-8241

    • JDBC sink test suite should use the debezium/connect:nightly image for e2e tests DBZ-8245

    • Bump Infinispan to 15.0.8.Final DBZ-8246

    • AbstractConnectorTest consumeRecordsUntil may prematurely exit loop DBZ-8250

    • Performance Regression in Debezium Server Kafka after DBZ-7575 fix DBZ-8251

    • Add a note to the docs about JDBC batch retry configs DBZ-8252

    • Fix conditionalization in shared MariaDB/MySQL file DBZ-8254

    • Error Prone library included in MySQL connector DBZ-8258

    • Debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-8259

    • Add Oracle FUTC license DBZ-8260

    • Remove Oracle libs from product assembly package DBZ-8261

    • debezium-connector-binlog does not need MariaDB dependency DBZ-8263

    • Provide subset package for Debezium Server DBZ-8264

    Altogether, 44 issues were fixed in this release. Here are a list of some additional noteworthy changes:

    A huge thank you to all contributors from the community who worked on this release: Andrei Leibovski, Anisha Mohanty, Chris Cranford, Cody Holmes, Inki Hwang, Jiri Pechanec, Mario Fiore Vitale, Robert Roldan, Vojtěch Juránek, and martin!

    What’s next & Outlook?

    First and foremost, assuming there are no critical issues, you can expect the final release of Debezium 3 sometime next week. As we begin to shift toward the remainder of 2024, it’s important to discuss what lies ahead for the remainder of this year and 2025.

    For the remainder of the 2024 calendar year, the team will continue to deliver maintenance and bugfix releases for Debezium 3. These will continue at our normal cadence, approximately every 2-3 weeks, barring issues reported and those fixed. However, what will be changing this quarter is that there won’t be a minor release in December like years past. This decision is to allow the team to focus on a number of low-hanging fruit activities that don’t necessarily or directly tie to development tasks.

    In addition, as we get closer to the end of the year, we’ll be putting out our vision of the 2025 roadmap and what that includes. Now is a perfect time to think about what you’d like to see on that roadmap and get involved. You can always reach us on our mailing list or in our Zulip chat!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/10/02/debezium-3-0-final-released/index.html b/blog/2024/10/02/debezium-3-0-final-released/index.html index 6ee0655369..af992625c8 100644 --- a/blog/2024/10/02/debezium-3-0-final-released/index.html +++ b/blog/2024/10/02/debezium-3-0-final-released/index.html @@ -36,4 +36,4 @@ "quantity": 2, "totalPrice": 39.98 } -}

    So you can safely implement the Outbox pattern without the physical outbox table! (DBZ-8103).

    PostgreSQL isolation level support

    A longstanding enhancement for snapshot isolation support for PostgreSQL is now here! A new connector configuration property, snapshot.isolation.mode, allows the connector to control the consistency used while executing the initial and ad-hoc blocking snapshot steps. There are four isolation levels: serializable (the default), repeatable_read, read_committed, and read_uncommitted. You can find details about these isolation levels and how they work with PostgreSQL in the documentation (DBZ-1252).

    Reselect post processor improvements

    The ReselectPostProcessor is a useful tool to handle populating change events that contain TOAST columns (the oversized-attribute storage technique). By default, when a TOAST column is found and is not mutated by the SQL operation, Debezium populates these fields with placeholders, indicating that the value wasn’t provided, but also wasn’t changed. A host of data types use this storage mechanism, including int/bigint arrays. With Debezium 3, these int/bigint array data types can be reselected by the post processor so that these fields are always populated, even when they’re not changed in the SQL operation (DBZ-8212).

    Changes to SQL Server connector

    Signal and notification MBean name changes

    The JMX signaling and notifications for SQL Server did not work correctly when a connector was configured with multiple databases spawning multiple tasks. To resolve this issue, it was necessary to change the naming of signalling and notification MBean names to make sure they are unique per task (DBZ-8137).

    Changes to JDBC sink connector

    Relocation of JDBC sink repository

    The JDBC sink repository has been relocated from debezium-connector-jdbc to debezium main repository (DBZ-8008). With the introduction of the MongoDB sink connector in Debezium 3, this allows the team to easily share common contracts across our sink connectors.

    Moving forward, to raise pull requests for the JDBC sink, please use the main Debezium repository, as the old repository has been archived and is only read-only.

    JDBC retry flushes on specific failures

    The JDBC sink uses a set of buffers to improve the throughput writes to the target database. In some use cases, the flush operation of these buffers may face specific exceptions due to locks due to other applications that may have locked a specific row or table. To improve the user experience, two new configuration properties have been added:

    flush.failure.max.retries

    Defines the number of retries when a flush failure occurs.

    flush.failure.retries.wait.ms

    Defines the number of milliseconds to wait between a retry.

    The retry feature is enabled by default, attempting to retry up to a maximum 5 attempts, with a 1-second delay between retries. If you prefer retries disabled, setting flush.failure.max.retries to 0 would disable this feature (DBZ-7291).

    Changes to Debezium Server

    Breaking changes

    Debezium Server Kafka Sink

    The Debezium Server Kafka sink adapter could wait indefinitely when a Kafka broker becomes unavailable. A new configurable timeout has been added to the sink adapter to force the adapter to fail when the timeout is reached. The new option, debezium.sink.kafka.wait.message.delivery.timeout.ms, has a default value of 30 seconds. Please adjust this accordingly if the default is insufficient for your needs (DBZ-7575).

    Debezium Server RabbitMQ sink

    The Debezium Server RabbitMQ sink adapter was sending all changes to the same single stream. While this may be useful for some scenarios, this does not align well with other broker systems where each table is streamed to its own unique topic or stream. With Debezium 3, this logic has changed and each table will be streamed to its own unique stream by default. When setting debezium.sink.rabbitmqstream.stream, you can enable the legacy behavior of streaming all changes to the same stream (DBZ-8118).

    Support custom converter types

    In prior releases of Debezium Server, there were a finite number of converters that could be used for headers, keys, and values. These included Json, JsonByteArray, CloudEvents, Avro, Protobuf, Binary, and SimpleString. While these often satisfied a vast majority of use cases, it’s not uncommon that someone may have a unique requirement specific to their environment that is outside these options.

    In this release, a new ClientProvided converter option has been added, which allows for extending the header, key, and value converters with a custom, user-supplied implementation (DBZ-8040).

    Improved logging for Kafka sink

    The Kafka sink adapter will now log the record key when Debezium fails to send the record to the Kafka broker. This is useful to know what specific record was a problem without necessarily needing to increase the logging verbosity of the runtime (DBZ-8282).

    Changes to Spanner connector

    Support for 32-bit floats

    The Google Spanner database introduced support for a 32-bit float data type. The Debezium Google Spanner connector has been adjusted to support this new data type (DBZ-8043).

    Changes to Vitess connector

    Empty shard support

    In Vitess, it is possible for a keyspace to have shards that have no tablets. Debezium for Vitess has improved working with this use case, and now gracefully handles such a keyspace without fault (DBZ-8053).

    Inherit shard epoch

    A new Vitess connector configuration property has been added to control whether epochs of a new shard, after a re-shard operation, inherits epochs from its parent shard. This new configuration property, vitess.inherit.epoch, defaults to false and isn’t enabled by default (DBZ-8163).

    Other fixes & improvements

    There were many bugfixes, stability changes, and improvements throughout the development of Debezium 2.0. Altogether, a total of 202 issues were fixed for this release.

    A big thank you to all the contributors from the community who worked on this major release: Jordan Pittier, Kanthi Subramanian, Katerina Galieva, Kaustuv chakrabarti, Keri Harris, Kevin Rothenberger, Kosta Kostelnik, Lars M. Johansson, Liz Chatman, Lokesh Sanapalli, Lourens Naudé, Luca Scannapieco, M. Gökhan Akgül, Maithem, Marcelo Avancini, Mario Fiore Vitale, Mark Banierink, Mark Bereznitsky, Mark Ducommun, Mark Lambert, Martin Medek, Massimo Fortunat, Matt Vance, Mehmet Firat Komurcu, Michal Augustýn, Michal Pioun, Mickael Maison, Miguel Angel Sotomayor, Mike Kamornikov, Minh Son Nguyen, Mohamed El Shaer, Mostafa Ghadimi, My Lang Pangzi, Nancy Xu, Nick Golubev, Nikhil Benesch, Nir Levy, Olivier Boudet, Ondrej Babec, Oren Elias, Paul Cheung, Pengwei Dou, Peter Hamer, Piotr Piastucki, Plugaru Tudor, Poonam Meghnani, Pradeep Nain, Praveen Burgu, RJ Nowling, Rafael Câmara, Rajendra Dangwal, Raúl Estrada, René Kerner, Richard Harrington, Robert Roldan, Robin Moffatt, Roman Kudryashov, Ronak Jain, Russell Mora, Ryan van Huuksloot, Sahap Asci, Sean C. Sullivan, Sean Wu, Sebastiaan Knijnenburg, Selman Genç, Seo Jae-kwon, Seongjoon Jeong, Sergei Kazakov, Sergei Morozov, Sergey Eizner, Sergey Ivanov, Stavros Champilomatis, Stefan Miklosovic, Stein Rolevink, Stephen Clarkson, Subodh Kant Chaturvedi, Sun Xiao Jian, Sylvain Marty, Thomas Thornton, Théophile Helleboid, Tiernay, Tim Loes, Timo Wilhelm, Tomasz Gawęda, Tommy Karlsson, https://github.com/blcksrx Hossein[Torabi], Tudor Plugaru, V K, Vadzim Ramanenka, Vaibhav Kushwaha, Vincenzo Santonastaso, Vincenzo Santonastaso, Vojtěch Juránek, Wu Zhenhua, Xianming Zhou, Xiaojian Sun, Xinbin Huang, Xuan Shen, Yang Wu, Yanjie Wang, Yashashree Chopada, Yohei Yoshimuta, Zheng Wang, Zhongqiang Gong, baabgai, david remy, einar-rt, ibnubay, ismail simsek, leoloel, martin, ming luo, moyq5, ruslan, sean, tison, tony joseph, tooptoop4, yohei yoshimuta, حمود سمبول, and 蔡灿材!

    What’s next?

    For the remainder of the 2024 calendar year, the team will continue to deliver maintenance and bugfix releases for Debezium 3. These will continue at our normal cadence, approximately every 2-3 weeks, barring issues reported and those fixed. However, what will be changing this quarter is that there won’t be a minor release in December like years past. This decision is to allow the team to focus on a number of low-hanging fruit activities that don’t necessarily or directly tie to development tasks.

    In addition, as we get closer to the end of the year, we’ll be putting out our vision of the 2025 roadmap and what that includes. Now is a perfect time to think about what you’d like to see on that roadmap and get involved. You can always reach us on our mailing list or in our Zulip chat!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +}

    So you can safely implement the Outbox pattern without the physical outbox table! (DBZ-8103).

    PostgreSQL isolation level support

    A longstanding enhancement for snapshot isolation support for PostgreSQL is now here! A new connector configuration property, snapshot.isolation.mode, allows the connector to control the consistency used while executing the initial and ad-hoc blocking snapshot steps. There are four isolation levels: serializable (the default), repeatable_read, read_committed, and read_uncommitted. You can find details about these isolation levels and how they work with PostgreSQL in the documentation (DBZ-1252).

    Reselect post processor improvements

    The ReselectPostProcessor is a useful tool to handle populating change events that contain TOAST columns (the oversized-attribute storage technique). By default, when a TOAST column is found and is not mutated by the SQL operation, Debezium populates these fields with placeholders, indicating that the value wasn’t provided, but also wasn’t changed. A host of data types use this storage mechanism, including int/bigint arrays. With Debezium 3, these int/bigint array data types can be reselected by the post processor so that these fields are always populated, even when they’re not changed in the SQL operation (DBZ-8212).

    Changes to SQL Server connector

    Signal and notification MBean name changes

    The JMX signaling and notifications for SQL Server did not work correctly when a connector was configured with multiple databases spawning multiple tasks. To resolve this issue, it was necessary to change the naming of signalling and notification MBean names to make sure they are unique per task (DBZ-8137).

    Changes to JDBC sink connector

    Relocation of JDBC sink repository

    The JDBC sink repository has been relocated from debezium-connector-jdbc to debezium main repository (DBZ-8008). With the introduction of the MongoDB sink connector in Debezium 3, this allows the team to easily share common contracts across our sink connectors.

    Moving forward, to raise pull requests for the JDBC sink, please use the main Debezium repository, as the old repository has been archived and is only read-only.

    JDBC retry flushes on specific failures

    The JDBC sink uses a set of buffers to improve the throughput writes to the target database. In some use cases, the flush operation of these buffers may face specific exceptions due to locks due to other applications that may have locked a specific row or table. To improve the user experience, two new configuration properties have been added:

    flush.failure.max.retries

    Defines the number of retries when a flush failure occurs.

    flush.failure.retries.wait.ms

    Defines the number of milliseconds to wait between a retry.

    The retry feature is enabled by default, attempting to retry up to a maximum 5 attempts, with a 1-second delay between retries. If you prefer retries disabled, setting flush.failure.max.retries to 0 would disable this feature (DBZ-7291).

    Changes to Debezium Server

    Breaking changes

    Debezium Server Kafka Sink

    The Debezium Server Kafka sink adapter could wait indefinitely when a Kafka broker becomes unavailable. A new configurable timeout has been added to the sink adapter to force the adapter to fail when the timeout is reached. The new option, debezium.sink.kafka.wait.message.delivery.timeout.ms, has a default value of 30 seconds. Please adjust this accordingly if the default is insufficient for your needs (DBZ-7575).

    Debezium Server RabbitMQ sink

    The Debezium Server RabbitMQ sink adapter was sending all changes to the same single stream. While this may be useful for some scenarios, this does not align well with other broker systems where each table is streamed to its own unique topic or stream. With Debezium 3, this logic has changed and each table will be streamed to its own unique stream by default. When setting debezium.sink.rabbitmqstream.stream, you can enable the legacy behavior of streaming all changes to the same stream (DBZ-8118).

    Support custom converter types

    In prior releases of Debezium Server, there were a finite number of converters that could be used for headers, keys, and values. These included Json, JsonByteArray, CloudEvents, Avro, Protobuf, Binary, and SimpleString. While these often satisfied a vast majority of use cases, it’s not uncommon that someone may have a unique requirement specific to their environment that is outside these options.

    In this release, a new ClientProvided converter option has been added, which allows for extending the header, key, and value converters with a custom, user-supplied implementation (DBZ-8040).

    Improved logging for Kafka sink

    The Kafka sink adapter will now log the record key when Debezium fails to send the record to the Kafka broker. This is useful to know what specific record was a problem without necessarily needing to increase the logging verbosity of the runtime (DBZ-8282).

    Changes to Spanner connector

    Support for 32-bit floats

    The Google Spanner database introduced support for a 32-bit float data type. The Debezium Google Spanner connector has been adjusted to support this new data type (DBZ-8043).

    Changes to Vitess connector

    Empty shard support

    In Vitess, it is possible for a keyspace to have shards that have no tablets. Debezium for Vitess has improved working with this use case, and now gracefully handles such a keyspace without fault (DBZ-8053).

    Inherit shard epoch

    A new Vitess connector configuration property has been added to control whether epochs of a new shard, after a re-shard operation, inherits epochs from its parent shard. This new configuration property, vitess.inherit.epoch, defaults to false and isn’t enabled by default (DBZ-8163).

    Other fixes & improvements

    There were many bugfixes, stability changes, and improvements throughout the development of Debezium 2.0. Altogether, a total of 202 issues were fixed for this release.

    A big thank you to all the contributors from the community who worked on this major release: Jordan Pittier, Kanthi Subramanian, Katerina Galieva, Kaustuv chakrabarti, Keri Harris, Kevin Rothenberger, Kosta Kostelnik, Lars M. Johansson, Liz Chatman, Lokesh Sanapalli, Lourens Naudé, Luca Scannapieco, M. Gökhan Akgül, Maithem, Marcelo Avancini, Mario Fiore Vitale, Mark Banierink, Mark Bereznitsky, Mark Ducommun, Mark Lambert, Martin Medek, Massimo Fortunat, Matt Vance, Mehmet Firat Komurcu, Michal Augustýn, Michal Pioun, Mickael Maison, Miguel Angel Sotomayor, Mike Kamornikov, Minh Son Nguyen, Mohamed El Shaer, Mostafa Ghadimi, My Lang Pangzi, Nancy Xu, Nick Golubev, Nikhil Benesch, Nir Levy, Olivier Boudet, Ondrej Babec, Oren Elias, Paul Cheung, Pengwei Dou, Peter Hamer, Piotr Piastucki, Plugaru Tudor, Poonam Meghnani, Pradeep Nain, Praveen Burgu, RJ Nowling, Rafael Câmara, Rajendra Dangwal, Raúl Estrada, René Kerner, Richard Harrington, Robert Roldan, Robin Moffatt, Roman Kudryashov, Ronak Jain, Russell Mora, Ryan van Huuksloot, Sahap Asci, Sean C. Sullivan, Sean Wu, Sebastiaan Knijnenburg, Selman Genç, Seo Jae-kwon, Seongjoon Jeong, Sergei Kazakov, Sergei Morozov, Sergey Eizner, Sergey Ivanov, Stavros Champilomatis, Stefan Miklosovic, Stein Rolevink, Stephen Clarkson, Subodh Kant Chaturvedi, Sun Xiao Jian, Sylvain Marty, Thomas Thornton, Théophile Helleboid, Tiernay, Tim Loes, Timo Wilhelm, Tomasz Gawęda, Tommy Karlsson, https://github.com/blcksrx Hossein[Torabi], Tudor Plugaru, V K, Vadzim Ramanenka, Vaibhav Kushwaha, Vincenzo Santonastaso, Vincenzo Santonastaso, Vojtěch Juránek, Wu Zhenhua, Xianming Zhou, Xiaojian Sun, Xinbin Huang, Xuan Shen, Yang Wu, Yanjie Wang, Yashashree Chopada, Yohei Yoshimuta, Zheng Wang, Zhongqiang Gong, baabgai, david remy, einar-rt, ibnubay, ismail simsek, leoloel, martin, ming luo, moyq5, ruslan, sean, tison, tony joseph, tooptoop4, yohei yoshimuta, حمود سمبول, and 蔡灿材!

    What’s next?

    For the remainder of the 2024 calendar year, the team will continue to deliver maintenance and bugfix releases for Debezium 3. These will continue at our normal cadence, approximately every 2-3 weeks, barring issues reported and those fixed. However, what will be changing this quarter is that there won’t be a minor release in December like years past. This decision is to allow the team to focus on a number of low-hanging fruit activities that don’t necessarily or directly tie to development tasks.

    In addition, as we get closer to the end of the year, we’ll be putting out our vision of the 2025 roadmap and what that includes. Now is a perfect time to think about what you’d like to see on that roadmap and get involved. You can always reach us on our mailing list or in our Zulip chat!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/10/03/plan-to-move-debezium-to-foundation/index.html b/blog/2024/10/03/plan-to-move-debezium-to-foundation/index.html index 511282abcb..2f1be799e1 100644 --- a/blog/2024/10/03/plan-to-move-debezium-to-foundation/index.html +++ b/blog/2024/10/03/plan-to-move-debezium-to-foundation/index.html @@ -1 +1 @@ - Moving Debezium to a Foundation

    tl;dr

    We are considering moving Debezium to a Software Foundation to expand our community, become more open and transparent in our roadmap and decisions, and encourage multi-vendor participation and execution.

    Introduction

    Since its inception in late 2015, Debezium has strived to be the leading open-source platform for Change Data Capture. A project that started focusing on two connectors has now grown into a portfolio of connectors for nearly a dozen different database vendors.

    Over the past several years, there has been rapid growth in multiple areas. We’ve introduced sink connectors to our portfolio to improve the user experience and offer solutions for full end-to-end Change Data Capture pipelines. We have introduced supplementary components such as the Debezium Operator and Debezium Server, providing alternative deployment options rather than just Kafka Connect as the only runtime.

    But beyond that, the team has grown in other ways, too. Not only have we had the pleasure of expanding our committer team, but we’ve also enlisted nearly a half-dozen volunteers from the community who help contribute, maintain, and lead our community-led connector portfolio for Google Spanner, Vitess, Informix, and Cassandra.

    The project’s overall success would not have been possible without the passion, dedication, and collaboration of our volunteers, contributors, committers, and the extraordinary and vibrant user community.

    Why move to a Foundation?

    For Debezium to remain the leading open-source platform for Change Data Capture, we must consider long-term goals to enable the project to adapt to a fast-paced, highly innovative landscape.

    Our goals are simple:

    • Continue to grow our community

    • Increase the adoption and awareness of Debezium

    • Embrace transparency in our roadmap and decision-making

    • Encourage multi-vendor participation and execution

    We believe that moving Debezium to a Foundation will help drive these goals more organically, benefiting everyone involved.

    Support and Alignment with Red Hat Values

    Red Hat is dedicated to fostering a vendor-neutral culture for collaboration on Debezium, like other projects such as the Linux kernel, Kubernetes, OpenJDK, and Quarkus.

    Community Feedback

    So far, all preliminary discussions about this change have been positive. But we want to make sure that we make a decision that provides a net benefit, so we’ve created a set of criteria to evaluate foundations, which includes:

    • Remain visible, relevant, and recognizable in a foundation’s portfolio.

    • Maintain our current release processes without extra processes or steps.

    • Make independent, self-informed decisions rather than those imposed.

    • License flexibility to continue using a wide array of third-party libraries.

    • Flexibility to create sub-projects or use the Debezium brand, focusing on new areas of interest or proofs of concepts that could expand the project’s portfolio.

    We invite you, the broader Debezium community, to get involved in the conversation. You can share your concerns and constructive feedback on our mailing list or zulip chat. This is crucial for us to decide the best home for Debezium.

    Let’s guarantee that Debezium continues to grow, thrive, and remain the leader in Change Data Capture together!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Moving Debezium to a Foundation

    tl;dr

    We are considering moving Debezium to a Software Foundation to expand our community, become more open and transparent in our roadmap and decisions, and encourage multi-vendor participation and execution.

    Introduction

    Since its inception in late 2015, Debezium has strived to be the leading open-source platform for Change Data Capture. A project that started focusing on two connectors has now grown into a portfolio of connectors for nearly a dozen different database vendors.

    Over the past several years, there has been rapid growth in multiple areas. We’ve introduced sink connectors to our portfolio to improve the user experience and offer solutions for full end-to-end Change Data Capture pipelines. We have introduced supplementary components such as the Debezium Operator and Debezium Server, providing alternative deployment options rather than just Kafka Connect as the only runtime.

    But beyond that, the team has grown in other ways, too. Not only have we had the pleasure of expanding our committer team, but we’ve also enlisted nearly a half-dozen volunteers from the community who help contribute, maintain, and lead our community-led connector portfolio for Google Spanner, Vitess, Informix, and Cassandra.

    The project’s overall success would not have been possible without the passion, dedication, and collaboration of our volunteers, contributors, committers, and the extraordinary and vibrant user community.

    Why move to a Foundation?

    For Debezium to remain the leading open-source platform for Change Data Capture, we must consider long-term goals to enable the project to adapt to a fast-paced, highly innovative landscape.

    Our goals are simple:

    • Continue to grow our community

    • Increase the adoption and awareness of Debezium

    • Embrace transparency in our roadmap and decision-making

    • Encourage multi-vendor participation and execution

    We believe that moving Debezium to a Foundation will help drive these goals more organically, benefiting everyone involved.

    Support and Alignment with Red Hat Values

    Red Hat is dedicated to fostering a vendor-neutral culture for collaboration on Debezium, like other projects such as the Linux kernel, Kubernetes, OpenJDK, and Quarkus.

    Community Feedback

    So far, all preliminary discussions about this change have been positive. But we want to make sure that we make a decision that provides a net benefit, so we’ve created a set of criteria to evaluate foundations, which includes:

    • Remain visible, relevant, and recognizable in a foundation’s portfolio.

    • Maintain our current release processes without extra processes or steps.

    • Make independent, self-informed decisions rather than those imposed.

    • License flexibility to continue using a wide array of third-party libraries.

    • Flexibility to create sub-projects or use the Debezium brand, focusing on new areas of interest or proofs of concepts that could expand the project’s portfolio.

    We invite you, the broader Debezium community, to get involved in the conversation. You can share your concerns and constructive feedback on our mailing list or zulip chat. This is crucial for us to decide the best home for Debezium.

    Let’s guarantee that Debezium continues to grow, thrive, and remain the leader in Change Data Capture together!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/10/04/debezium-community-stores-with-lars-johansson/index.html b/blog/2024/10/04/debezium-community-stores-with-lars-johansson/index.html index 8c29c56a52..aa4483d78a 100644 --- a/blog/2024/10/04/debezium-community-stores-with-lars-johansson/index.html +++ b/blog/2024/10/04/debezium-community-stores-with-lars-johansson/index.html @@ -1,3 +1,3 @@ Debezium Community Stories With... Lars M Johansson

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today, it’s my pleasure to talk to Lars M Johansson.

    Lars, could you introduce yourself? What is your job, if you’re not contributing to Debezium?

    Hi, my name is Lars Johansson (which is a fairly common name in Sweden, hence the middle initial 'M' for Magnus), and I’m an IT-Architect and consultant, running my own business since the beginning of this year. I’ve been with a number of consulting firms and some "proper" tech companies over the years but I have almost exclusively kept to the Unix/Linux, Java and Open Source side of the industry. I have worked on various types of business systems with clients ranging from banking, through retail and telecom, to life sciences and public sector.

    These past few years I’ve had an assignment with the Swedish Migration Agency within their Digital Transformation programme (funded by the European Union). Part of that transformation has been the introduction of Event Driven Architecture and Apache Kafka, and with that Change Data Capture though Kafka Connect.

    What are your use cases for Debezium and change data capture in your current project?

    We have two main use cases for Debezium and CDC.

    The first, and not quite as challenging as the second, is implementing the Transactional Outbox Pattern to produce business events from existing, actively developed applications to Kafka. We do that with the Outbox Event Router and corresponding connector against mostly PostgreSQL databases.

    The second, more challenging use case is producing business events from an older legacy system based on IBM Informix. This is part of a transition strategy to decouple new applications and services from the old legacy system by exposing relevant state changes as business events in the new event streaming platform.

    When we stared this journey there was no publicly available connector plugin for Informix. The reason for that can probably be summed up in Gunnar’s comment when I asked him about it during an "Ask the Experts" session at Kafka Summit:

    > Gunnar: I think this is the third time I've heard that question.
     > Lars: Today?
    -> Gunnar: No, in total.

    So we started writing our own, and it was a journey of learning. The more we learnt, about the internals of the Informix database, its change streaming APIs and about Kafka Connect, the more we realised maybe we had taken a larger bite than we could chew.

    Also, as the work with transactional outboxes progressed, we came to realise that we would eventually want to implement that pattern for Informix as well, and utilising the Outbox Event Router would be a great benefit. So, we stared looking at porting out work to the Debezium platform.

    Actually, we weren’t the only ones looking at building a Debezium connector for Informix. We found an embryo of a connector at Debezium Connector for Informix. It was based on Debezium 1.5, so we set out to bring it up to 2.x standard and merge our work into it.

    Simultaneously there was an initiative at the agency regarding how to deal with contributions to Open Source. The agency’s work is financed by public funds, from the Swedish government and from the European Union, and there are strategies and policies on national as well as European levels that promote contributing back to Open Source projects. As a result, a policy was drafted and adopted that allowed us to contribute our work back to the Debezium community, and the Debezium Connector for Informix was released to the public with v2.5.0.Alpha1.

    This sounds really interesting; can you tell us more about the challenges you encountered and how you solved them?

    Although superficially similar, all databases do things slightly different underneath the hood. Even more so when it comes to change data capture APIs. The publicly available documentation on Informix, and it’s CDC API also does not go into much depth, and not all features are documented and some documented features are not implemented. So just figuring out all the quirks and handling their edge cases took considerable effort. For instance mapping the different data types and which data types are supported in the CDC API.

    But the greatest challenge was without doubt getting all restart and recover cases to work securely and consistently. In the end it was mostly a matter of grit: doubling down on integration tests, making sure all conceivable cases are covered and then code, test, refactor. Then test, test and test again… And this probably one of the biggest benefits of porting our work to Debezium: with the testing framework and extensive test suite implementations of the existing modules to look to for inspiration and ideas.

    Another challenge, that is more to do with Open Source than CDC, is that we are developing to solve our own use cases. But other people have other use cases, and they make the plugin break in (for us) entirely unforeseen ways. That was frustrating to begin with before we got used to it, but in the end it is part of the beauty of Open Source and what makes us all better in the end.

    As a contributor to a community-led Debezium connector, how was the experience?

    It’s been an overall smooth experience. Once the initiative to publish our work back to the community was approved by the Agency, we started setting up the repository and integrating with the CI tools. We got good help from you Chris and Jiri Pechanec, and the contribution guide was also helpful. Despite the Informix community not being particularly vibrant, we did get some attention and feedback, bug-reports and suggested improvements, quite early on, and we have been steadily improving.

    Are you doing other open-source work, too?

    I have dabbled a bit here and there but this is my first proper contribution to an open-source project. Certainly the first to get any sort of attention.

    Is there features in Debezium you believe are missing you’d like to see in the future?

    Compared to writing your own connector from scratch, Debezium is really a Swiss Army knife of connector development. But if I have to suggest something, it would be common support for distributed cache off-load of very large transactions, since that is a problem we have actually run across in Informix…

    Lars, thanks a lot for taking your time, it was a pleasure to have you here!

    If you’re like to stay in touch with Lars M Johansson and discuss with him, please drop a comment below or follow and reach out to him on GitHub.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +> Gunnar: No, in total.

    So we started writing our own, and it was a journey of learning. The more we learnt, about the internals of the Informix database, its change streaming APIs and about Kafka Connect, the more we realised maybe we had taken a larger bite than we could chew.

    Also, as the work with transactional outboxes progressed, we came to realise that we would eventually want to implement that pattern for Informix as well, and utilising the Outbox Event Router would be a great benefit. So, we stared looking at porting out work to the Debezium platform.

    Actually, we weren’t the only ones looking at building a Debezium connector for Informix. We found an embryo of a connector at Debezium Connector for Informix. It was based on Debezium 1.5, so we set out to bring it up to 2.x standard and merge our work into it.

    Simultaneously there was an initiative at the agency regarding how to deal with contributions to Open Source. The agency’s work is financed by public funds, from the Swedish government and from the European Union, and there are strategies and policies on national as well as European levels that promote contributing back to Open Source projects. As a result, a policy was drafted and adopted that allowed us to contribute our work back to the Debezium community, and the Debezium Connector for Informix was released to the public with v2.5.0.Alpha1.

    This sounds really interesting; can you tell us more about the challenges you encountered and how you solved them?

    Although superficially similar, all databases do things slightly different underneath the hood. Even more so when it comes to change data capture APIs. The publicly available documentation on Informix, and it’s CDC API also does not go into much depth, and not all features are documented and some documented features are not implemented. So just figuring out all the quirks and handling their edge cases took considerable effort. For instance mapping the different data types and which data types are supported in the CDC API.

    But the greatest challenge was without doubt getting all restart and recover cases to work securely and consistently. In the end it was mostly a matter of grit: doubling down on integration tests, making sure all conceivable cases are covered and then code, test, refactor. Then test, test and test again… And this probably one of the biggest benefits of porting our work to Debezium: with the testing framework and extensive test suite implementations of the existing modules to look to for inspiration and ideas.

    Another challenge, that is more to do with Open Source than CDC, is that we are developing to solve our own use cases. But other people have other use cases, and they make the plugin break in (for us) entirely unforeseen ways. That was frustrating to begin with before we got used to it, but in the end it is part of the beauty of Open Source and what makes us all better in the end.

    As a contributor to a community-led Debezium connector, how was the experience?

    It’s been an overall smooth experience. Once the initiative to publish our work back to the community was approved by the Agency, we started setting up the repository and integrating with the CI tools. We got good help from you Chris and Jiri Pechanec, and the contribution guide was also helpful. Despite the Informix community not being particularly vibrant, we did get some attention and feedback, bug-reports and suggested improvements, quite early on, and we have been steadily improving.

    Are you doing other open-source work, too?

    I have dabbled a bit here and there but this is my first proper contribution to an open-source project. Certainly the first to get any sort of attention.

    Is there features in Debezium you believe are missing you’d like to see in the future?

    Compared to writing your own connector from scratch, Debezium is really a Swiss Army knife of connector development. But if I have to suggest something, it would be common support for distributed cache off-load of very large transactions, since that is a problem we have actually run across in Informix…

    Lars, thanks a lot for taking your time, it was a pleasure to have you here!

    If you’re like to stay in touch with Lars M Johansson and discuss with him, please drop a comment below or follow and reach out to him on GitHub.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/10/14/Detect-data-mutation-patterns-with-Debezium/index.html b/blog/2024/10/14/Detect-data-mutation-patterns-with-Debezium/index.html index 675e1dfd63..1a72642615 100644 --- a/blog/2024/10/14/Detect-data-mutation-patterns-with-Debezium/index.html +++ b/blog/2024/10/14/Detect-data-mutation-patterns-with-Debezium/index.html @@ -9,4 +9,4 @@ debezium_metrics_create_events_count{context="streaming",name="monitoring",plugin="postgres",table="inventory.orders",} 100.0

    Components start up

    After that you are ready to start the demo with the following steps:

    1. Build our order service.

      order-service/mvnw package -f order-service/pom.xml
    2. Run our compose file to start everything is needed.

      export DEBEZIUM_VERSION=3.0.0.Final
       docker-compose up -d --build
    3. When all service are up and running we can register our connector

      curl -i -X POST -H "Accept:application/json" -H  "Content-Type:application/json" http://localhost:8083/connectors/ -d @postgres-activity-monitoring.json

    Accessing the dashboard

    Open a web browser and go to the Grafana UI at http://localhost:3000. Login into the console as user admin with password admin. When asked either change the password or skip this step.

    Then, to monitor the order service activity, we have created the General/ Microservices activity monitoring dashboard.

    After a couple of minutes you should see that the order rate will be ~10 per second.

    To simulate a drop, we can just update the APP_VERSION env to a value different to 1.0.

    docker stop order-service
     docker rm -f order-service && \
    -docker compose run -d -e APP_VERSION=1.1 --name order-service order-service

    After a while you will see that the service will start creating orders with a ~50% drop (see Figure 1).

    Since we have also configured an alert to fire when the order rate is below 7, you can also check that it is firing in the alert panel.

    activity monitoring dashboard
    Figure 1.  

    But that’s not all, we have also configured a mail notification that you can check accessing the Fake SMTP UI at http://localhost:8085.

    Conclusion

    We have seen how Change Data Capture (CDC) can extract insights from the database to serve as key performance indicators (KPIs) in the reliability and observability of microservices. This approach allows us to avoid modifying our service to expose these metrics and instead rely on Debezium for data collection.

    While not all business metrics can be derived from database operations, a significant portion can be.

    Any comments, suggestions, or questions are welcome, so please feel free to reach out to me to discuss further.

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +docker compose run -d -e APP_VERSION=1.1 --name order-service order-service

    After a while you will see that the service will start creating orders with a ~50% drop (see Figure 1).

    Since we have also configured an alert to fire when the order rate is below 7, you can also check that it is firing in the alert panel.

    activity monitoring dashboard
    Figure 1.  

    But that’s not all, we have also configured a mail notification that you can check accessing the Fake SMTP UI at http://localhost:8085.

    Conclusion

    We have seen how Change Data Capture (CDC) can extract insights from the database to serve as key performance indicators (KPIs) in the reliability and observability of microservices. This approach allows us to avoid modifying our service to expose these metrics and instead rely on Debezium for data collection.

    While not all business metrics can be derived from database operations, a significant portion can be.

    Any comments, suggestions, or questions are welcome, so please feel free to reach out to me to discuss further.

    Fiore Mario Vitale

    Mario is a Senior Software Engineer at Red Hat. He lives in Italy.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/10/28/community-feedback-survey-2024/index.html b/blog/2024/10/28/community-feedback-survey-2024/index.html index b4c5a2d59d..2ca3156f18 100644 --- a/blog/2024/10/28/community-feedback-survey-2024/index.html +++ b/blog/2024/10/28/community-feedback-survey-2024/index.html @@ -1 +1 @@ - Community Feedback Survey 2024

    The Debezium project is conducting our 2024 Community Feedback survey, and we want to hear from YOU!

    The survey provides the team with valuable feedback about your past and present Debezium experiences, including what your future needs and goals are around change data capture. This feedback is vital to shape the future roadmap of the project and gives the team direction on the priorities to deliver on the community’s needs. The survey will only require a few short minutes of your time, is anonymous, so please, jump right in and get started now!

    The survey is open now and will remain open through the end of November.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Community Feedback Survey 2024

    The Debezium project is conducting our 2024 Community Feedback survey, and we want to hear from YOU!

    The survey provides the team with valuable feedback about your past and present Debezium experiences, including what your future needs and goals are around change data capture. This feedback is vital to shape the future roadmap of the project and gives the team direction on the priorities to deliver on the community’s needs. The survey will only require a few short minutes of your time, is anonymous, so please, jump right in and get started now!

    The survey is open now and will remain open through the end of November.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/10/28/debezium-3-0-1-final-released/index.html b/blog/2024/10/28/debezium-3-0-1-final-released/index.html index 4b13548f39..dc5acd0e10 100644 --- a/blog/2024/10/28/debezium-3-0-1-final-released/index.html +++ b/blog/2024/10/28/debezium-3-0-1-final-released/index.html @@ -2,4 +2,4 @@ <groupId>io.debezium</groupId> <artifactId>debezium-connector-cassandra-5</artifactId> <version>3.0.1.Final</version> -</dependency>

    In addition, the new Cassandra 5 connector is included with Debezium Server.

    MySQL 9.1 support

    Oracle released MySQL 9.1 just a short weeks ago, and we’re happy to also report that Debezium now supports MySQL 9.1 (DBZ-8424). This update also includes an update to the MySQL JDBC driver version 9.1.0.

    For existing users, this should require no additional changes.

    PostgreSQL 17 support

    The PostgreSQL team released the latest PostgreSQL 17 database at the end of September 2024, and we’re happy to report that Debezium officially supports PostgreSQL 17 (DBZ-8275). For any existing users, this update should require no additional changes.

    Using YAML with Debezium Server

    Debezium Server is a standalone environment for running Debezium connectors outside of a Kafka Connect environment on bare metal, VMs, or Kubernetes. Debezium Server users are familiar with the configuration property file called application.properties, where you supply the source and sink connector configurations to define your Debezium Server pipeline.

    We are excited to share that we’ve added support for YAML when using Debezium Server, allowing you to now define your configuration in application.yml (DBZ-8313). Please review the Quarkus documentation on using YAML configuration should you have any questions.

    Other fixes

    In total there were 35 issues resolved in Debezium 3.0.1.Final. The list of changes can also be found in our release notes.

    Here are some noteworthy changes:

    • Oracle DDL parsing will fail if the DDL ends with a new line character DBZ-7040

    • Missing documentation for MongoDb SSL configuration DBZ-7927

    • Conditionalization implemented for single-sourcing MySQL/MariaDB content isn’t working as expected DBZ-8094

    • Support batch write to AWS Kinesis DBZ-8193

    • Debezium is replaying all events from an older offset DBZ-8194

    • Embedded MySqlConnector "Unable to find minimal snapshot lock mode" since 2.5.4.Final DBZ-8271

    • Reselect Post Processor not working when pkey of type uuid etc. DBZ-8277

    • BinlogStreamingChangeEventSource totalRecordCounter is never updated DBZ-8290

    • Restart Oracle connector when ORA-01001 invalid cursor exception is thrown DBZ-8292

    • Connector uses incorrect partition names when creating offsets DBZ-8298

    • ReselectPostProcessor fails when reselecting columns from Oracle DBZ-8304

    • Debezium MySQL DDL parser: SECONDARY_ENGINE=RAPID does not support DBZ-8305

    • SQL Server Documentation for CDC on Server table DBZ-8314

    • Oracle DDL failure - subpartition list clause does not support in-memory clause DBZ-8315

    • DDL statement couldn’t be parsed DBZ-8316

    • Binary Log Client doesn’t process the TRANSACTION_ PAYLOAD header DBZ-8340

    • Oracle connector: archive.log.only.mode stop working after reach SYSDATE SCN DBZ-8345

    A big thank you to all the contributors from the community who worked diligently on this release: Anisha Mohanty, Kunal Bhatnagar, Chris Cranford, Dongwook Chan, Franz Emberger, Gaurav Miglani, Jiri Pechanec, Jonas Thelemann, Katerina Galieva, Mario Fiore Vitale, Nathan Smit, Ondrej Babec, Philippe Labat, Robert Roldan, Stefan Miklosovic, and Vojtech Juranek!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file +</dependency>

    In addition, the new Cassandra 5 connector is included with Debezium Server.

    MySQL 9.1 support

    Oracle released MySQL 9.1 just a short weeks ago, and we’re happy to also report that Debezium now supports MySQL 9.1 (DBZ-8424). This update also includes an update to the MySQL JDBC driver version 9.1.0.

    For existing users, this should require no additional changes.

    PostgreSQL 17 support

    The PostgreSQL team released the latest PostgreSQL 17 database at the end of September 2024, and we’re happy to report that Debezium officially supports PostgreSQL 17 (DBZ-8275). For any existing users, this update should require no additional changes.

    Using YAML with Debezium Server

    Debezium Server is a standalone environment for running Debezium connectors outside of a Kafka Connect environment on bare metal, VMs, or Kubernetes. Debezium Server users are familiar with the configuration property file called application.properties, where you supply the source and sink connector configurations to define your Debezium Server pipeline.

    We are excited to share that we’ve added support for YAML when using Debezium Server, allowing you to now define your configuration in application.yml (DBZ-8313). Please review the Quarkus documentation on using YAML configuration should you have any questions.

    Other fixes

    In total there were 35 issues resolved in Debezium 3.0.1.Final. The list of changes can also be found in our release notes.

    Here are some noteworthy changes:

    • Oracle DDL parsing will fail if the DDL ends with a new line character DBZ-7040

    • Missing documentation for MongoDb SSL configuration DBZ-7927

    • Conditionalization implemented for single-sourcing MySQL/MariaDB content isn’t working as expected DBZ-8094

    • Support batch write to AWS Kinesis DBZ-8193

    • Debezium is replaying all events from an older offset DBZ-8194

    • Embedded MySqlConnector "Unable to find minimal snapshot lock mode" since 2.5.4.Final DBZ-8271

    • Reselect Post Processor not working when pkey of type uuid etc. DBZ-8277

    • BinlogStreamingChangeEventSource totalRecordCounter is never updated DBZ-8290

    • Restart Oracle connector when ORA-01001 invalid cursor exception is thrown DBZ-8292

    • Connector uses incorrect partition names when creating offsets DBZ-8298

    • ReselectPostProcessor fails when reselecting columns from Oracle DBZ-8304

    • Debezium MySQL DDL parser: SECONDARY_ENGINE=RAPID does not support DBZ-8305

    • SQL Server Documentation for CDC on Server table DBZ-8314

    • Oracle DDL failure - subpartition list clause does not support in-memory clause DBZ-8315

    • DDL statement couldn’t be parsed DBZ-8316

    • Binary Log Client doesn’t process the TRANSACTION_ PAYLOAD header DBZ-8340

    • Oracle connector: archive.log.only.mode stop working after reach SYSDATE SCN DBZ-8345

    A big thank you to all the contributors from the community who worked diligently on this release: Anisha Mohanty, Kunal Bhatnagar, Chris Cranford, Dongwook Chan, Franz Emberger, Gaurav Miglani, Jiri Pechanec, Jonas Thelemann, Katerina Galieva, Mario Fiore Vitale, Nathan Smit, Ondrej Babec, Philippe Labat, Robert Roldan, Stefan Miklosovic, and Vojtech Juranek!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/11/04/debezium-moving-to-commonhaus/index.html b/blog/2024/11/04/debezium-moving-to-commonhaus/index.html index 72d0d40623..e22ed7b716 100644 --- a/blog/2024/11/04/debezium-moving-to-commonhaus/index.html +++ b/blog/2024/11/04/debezium-moving-to-commonhaus/index.html @@ -1 +1 @@ - Moving Debezium to the Commonhaus Foundation

    After consulting with the community both inside and outside of Red Hat, we have made the decision to submit a request for the Debezium project to join the Commonhaus Foundation.

    Today, we are thrilled to announce that the Debezium project is starting the transition to the Commonhaus Foundation. Debezium is the leading open-source distributed platform for Change Data Capture (CDC), and has been sponsored exclusively by Red Hat since its inception back in 2015. The move to CommonHaus aligns with our goal to foster a more inclusive, collaborative environment.

    Why Move to a Foundation?

    Debezium has seen exceptional growth and adoption since 2015. Many data streaming providers rely on a variety of foundational components, and for CDC, that most often means Debezium. We want to continue this momentum, and we believe moving to a foundation helps to facilitate that goal.

    For more details, see our goals and reasons for moving to a foundation.

    Why Commonhaus?

    Commonhaus stands out because of its innovative governance framework and commitment to project independence. This benefits the Debezium community and its collaborators by allowing us to continue to provide the same release cadence and commitment to excellence that we have today. We are thrilled to join other prominent projects at Commonhaus, which includes Hibernate, Jackson, and Quarkus.

    What’s Next?

    We are committed to keeping Debezium the open-source leader in the Change Data Capture (CDC) space. We remain ever vigilant in our pursuit to innovate, evolve, and deliver the best experience for our vibrant user community. This transition will enable us to welcome a wider array of contributions from a more diverse cross-section of developers and organizations.

    The move to foundation isn’t just about Debezium, it’s equally about the community. If you’re organization wishes to ensure that Debezium evolves and grows in a direction that benefits your organization, get involved. Join us in helping Debezium transition to the Commonhaus Foundation.

    The Commonhaus Foundation is currently bootstrapping, enabling it to be shaped to help you and open-source projects like Debezium have a place to call home. For questions about this move, please refer to our dedicated FAQ or reach out through our usual channels.

    On behalf of the Debezium team.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Moving Debezium to the Commonhaus Foundation

    After consulting with the community both inside and outside of Red Hat, we have made the decision to submit a request for the Debezium project to join the Commonhaus Foundation.

    Today, we are thrilled to announce that the Debezium project is starting the transition to the Commonhaus Foundation. Debezium is the leading open-source distributed platform for Change Data Capture (CDC), and has been sponsored exclusively by Red Hat since its inception back in 2015. The move to CommonHaus aligns with our goal to foster a more inclusive, collaborative environment.

    Why Move to a Foundation?

    Debezium has seen exceptional growth and adoption since 2015. Many data streaming providers rely on a variety of foundational components, and for CDC, that most often means Debezium. We want to continue this momentum, and we believe moving to a foundation helps to facilitate that goal.

    For more details, see our goals and reasons for moving to a foundation.

    Why Commonhaus?

    Commonhaus stands out because of its innovative governance framework and commitment to project independence. This benefits the Debezium community and its collaborators by allowing us to continue to provide the same release cadence and commitment to excellence that we have today. We are thrilled to join other prominent projects at Commonhaus, which includes Hibernate, Jackson, and Quarkus.

    What’s Next?

    We are committed to keeping Debezium the open-source leader in the Change Data Capture (CDC) space. We remain ever vigilant in our pursuit to innovate, evolve, and deliver the best experience for our vibrant user community. This transition will enable us to welcome a wider array of contributions from a more diverse cross-section of developers and organizations.

    The move to foundation isn’t just about Debezium, it’s equally about the community. If you’re organization wishes to ensure that Debezium evolves and grows in a direction that benefits your organization, get involved. Join us in helping Debezium transition to the Commonhaus Foundation.

    The Commonhaus Foundation is currently bootstrapping, enabling it to be shaped to help you and open-source projects like Debezium have a place to call home. For questions about this move, please refer to our dedicated FAQ or reach out through our usual channels.

    On behalf of the Debezium team.

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/11/18/debezium-3-0-2-final-released/index.html b/blog/2024/11/18/debezium-3-0-2-final-released/index.html index 0ddc2d3680..6a3c2adcf9 100644 --- a/blog/2024/11/18/debezium-3-0-2-final-released/index.html +++ b/blog/2024/11/18/debezium-3-0-2-final-released/index.html @@ -45,4 +45,4 @@ database.password: ${POSTGRES_PASSWORD} database.dbname: ${POSTGRES_DB} topic.prefix: inventory - schema.include.list: inventory

    By default, the Debezium Server API endpoint is disabled, but can be enabled by setting the spec.runtime.api.enabled with a value of true, as shown above.

    Other fixes

    In total there were 46 issues resolved in Debezium 3.0.2.Final. The list of changes can also be found in our release notes.

    Here are some noteworthy changes:

    • Clarify signal data collection should be unique per connector DBZ-6837

    • Race condition in stop-snapshot signal DBZ-8303

    • Debezium shifts binlog offset despite RabbitMQ Timeout and unconfirmed messages DBZ-8307

    • Use DebeziumSinkRecord instead of Kafka Connect’s SinkRecord inside Debezium sink connectors DBZ-8346

    • Implement new config map offset store in DS DBZ-8351

    • Debezium server with eventhubs sink type and eventhubs emulator connection string fails DBZ-8357

    • Filter for snapshot using signal doesn’t seem to work DBZ-8358

    • JDBC storage module does not use quay.io images DBZ-8362

    • Failure on offset store call to configure/start is logged at DEBUG level DBZ-8364

    • Object name is not in the list of S3 schema history fields DBZ-8366

    • Faulty "Failed to load mandatory config" error message DBZ-8367

    • Upgrade protobuf dependencies to avoid potential vulnerability DBZ-8371

    • Add transform page to provide a single place to list the already configured transform plus UI to add a new transform DBZ-8374

    • Upgrade Kafka to 3.8.1 DBZ-8385

    • Tests in IncrementalSnapshotIT may fail randomly DBZ-8386

    • Add Transform Edit and delete support. DBZ-8388

    • Log SCN existence check may throw ORA-01291 if a recent checkpoint occurred DBZ-8389

    • ExtractNewRecordState transform: NPE when processing non-envelope records DBZ-8393

    • Oracle LogMiner metric OldestScnAgeInMilliseconds can be negative DBZ-8395

    • SqlServerConnectorIT.restartInTheMiddleOfTxAfterCompletedTx fails randomly DBZ-8396

    • ExtractNewDocumentStateTestIT fails randomly DBZ-8397

    • BlockingSnapshotIT fails on Oracle DBZ-8398

    • Oracle OBJECT_ID lookup and cause high CPU and latency in Hybrid mining mode DBZ-8399

    • Upgrade Kafka to 3.9.0 DBZ-8400

    • Protobuf plugin does not compile for PostgreSQL 17 on Debian DBZ-8403

    • Update Quarkus Outbox Extension to Quarkus 3.16.3 DBZ-8409

    A big thank you to all the contributors from the community who worked diligently on this release: Anisha Mohanty, dario, Chris Cranford, Enzo Cappa, Jakub Cechacek, Jiri Pechanec, Kavya Ramaiah, Lars M. Johansson, Mario Fiore Vitale, Martin Vlk, P. Aum, René Kerner, Stanislav Deviatov, Stefan Miklosovic, Thomas Thornton, Vojtech Juranek, and Yevhenii Lopatenko!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + schema.include.list: inventory

    By default, the Debezium Server API endpoint is disabled, but can be enabled by setting the spec.runtime.api.enabled with a value of true, as shown above.

    Other fixes

    In total there were 46 issues resolved in Debezium 3.0.2.Final. The list of changes can also be found in our release notes.

    Here are some noteworthy changes:

    • Clarify signal data collection should be unique per connector DBZ-6837

    • Race condition in stop-snapshot signal DBZ-8303

    • Debezium shifts binlog offset despite RabbitMQ Timeout and unconfirmed messages DBZ-8307

    • Use DebeziumSinkRecord instead of Kafka Connect’s SinkRecord inside Debezium sink connectors DBZ-8346

    • Implement new config map offset store in DS DBZ-8351

    • Debezium server with eventhubs sink type and eventhubs emulator connection string fails DBZ-8357

    • Filter for snapshot using signal doesn’t seem to work DBZ-8358

    • JDBC storage module does not use quay.io images DBZ-8362

    • Failure on offset store call to configure/start is logged at DEBUG level DBZ-8364

    • Object name is not in the list of S3 schema history fields DBZ-8366

    • Faulty "Failed to load mandatory config" error message DBZ-8367

    • Upgrade protobuf dependencies to avoid potential vulnerability DBZ-8371

    • Add transform page to provide a single place to list the already configured transform plus UI to add a new transform DBZ-8374

    • Upgrade Kafka to 3.8.1 DBZ-8385

    • Tests in IncrementalSnapshotIT may fail randomly DBZ-8386

    • Add Transform Edit and delete support. DBZ-8388

    • Log SCN existence check may throw ORA-01291 if a recent checkpoint occurred DBZ-8389

    • ExtractNewRecordState transform: NPE when processing non-envelope records DBZ-8393

    • Oracle LogMiner metric OldestScnAgeInMilliseconds can be negative DBZ-8395

    • SqlServerConnectorIT.restartInTheMiddleOfTxAfterCompletedTx fails randomly DBZ-8396

    • ExtractNewDocumentStateTestIT fails randomly DBZ-8397

    • BlockingSnapshotIT fails on Oracle DBZ-8398

    • Oracle OBJECT_ID lookup and cause high CPU and latency in Hybrid mining mode DBZ-8399

    • Upgrade Kafka to 3.9.0 DBZ-8400

    • Protobuf plugin does not compile for PostgreSQL 17 on Debian DBZ-8403

    • Update Quarkus Outbox Extension to Quarkus 3.16.3 DBZ-8409

    A big thank you to all the contributors from the community who worked diligently on this release: Anisha Mohanty, dario, Chris Cranford, Enzo Cappa, Jakub Cechacek, Jiri Pechanec, Kavya Ramaiah, Lars M. Johansson, Mario Fiore Vitale, Martin Vlk, P. Aum, René Kerner, Stanislav Deviatov, Stefan Miklosovic, Thomas Thornton, Vojtech Juranek, and Yevhenii Lopatenko!

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/2024/12/18/debezium-3-0-5-final-released/index.html b/blog/2024/12/18/debezium-3-0-5-final-released/index.html index 358f0eb622..b0ce5a0204 100644 --- a/blog/2024/12/18/debezium-3-0-5-final-released/index.html +++ b/blog/2024/12/18/debezium-3-0-5-final-released/index.html @@ -1 +1 @@ - Debezium 3.0.5.Final Released

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    Breaking Changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Kafka Signal Source

    Debezium was reprocessing Kafka-based signals on connector restarts, which could introduce unpredictable behavior with unintended side effects. As a result, this feature has been removed, and if a connector stops, then the signal must be re-sent (DBZ-7856).

    Change Event Source Info Block

    The snapshot enumeration in the source information block in Debezium’s change events was extended to include all possible cases (DBZ-8496).

    New features and improvements

    Debezium 3.0.5.Final introduces a couple of improvements and features, lets take a look at each individually.

    Core

    Allow ad-hoc blocking snapshots on unknown tables

    While we recommend using the default configurations for schema management for all tables, we understand that isn’t always possible for every environment. A common question we often see is how does one snapshot newly added tables, particularly if the connector is configured using store.only.captured.tables.ddl as true.

    In this update, we have simplified the process where you can update your connector configuration with the new table, and once the connector task has restarted, sending an ad-hoc blocking snapshot signal will be permitted even on tables that are unknown but match the capture filters (DBZ-4903).

    Snapshot dispatch failure handling improvements

    When processing change events, users can control how the connector reacts to specific event handling failures using event.processing.failure.handling.mode; however, this has traditionally only been applicable for streaming changes.

    There are some corner cases where emitting snapshot events could also raise failures. To improve this experience, the event.processing.failure.handling.mode now influences failures when dispatching snapshot events, too (DBZ-8433).

    Connector startup configuration logging improved

    When a Debezium source connector starts, it logs all connector configurations. This is very helpful for both the team when diagnosing problems but also for users to identify that their configurations are being understood properly.

    In this update, the logging for the connector configuration has changed slightly. You will notice rather than placing each configuration property as a new log entry, each key/value will be appended together separated by new lines, and added to the log as a single entry. This makes it easier to identify these log lines and for tooling to exclude such log entries easier if needed (DBZ-8472).

    Postgres

    Support for PostgreSQL 17 fail-over replication slots

    PostgreSQL 17 introduces a long awaited feature called fail-over for replication slots.

    When the replication slot is created, a new property can be specified to enable the replication slot to be created and available on the fail-over replica. The synchronization of the fail-over replication slot can be performed manually by calling pg_sync_replication_slots() or automatically by enabling the sync_replication_slots feature.

    When automatic synchronization is enabled, this allows Debezium to consume from that slot immediately on fail-over to the replica and not miss any events.

    To take advantage of this new feature, your connector must be configured with slot.failover set to true. Debezium must be connecting to PostgreSQL 17+ and the primary on the cluster, otherwise no fail-over replication slot will be created (DBZ-8412).

    Oracle

    New metrics to track partial rollback events

    A partial rollback event is something unique to Oracle where a user performs an operation, it gets recorded as being performed in the transaction logs, but then a validation check forces the operation to be undone and rolled back. One of the most notable ways to trigger partial rollbacks are with constraint violations.

    The new JMX metric, NumberOfPartialRollbackCount, tracks the frequency of this event sequence in the transaction logs (DBZ-8491).

    The increase in this metric should be rare, and having a few occurrences throughout the day should pose no major problem.

    When you notice this metric increases frequently in a small window of time, this means you could have a poorly written script where a job is relying on constraint violations for some logic. While the connector can handle these use cases, it’s important to understand this creates unnecessary transaction log activity that can directly impact the speed and latency of streaming changes.

    Skip GoldenGate replication markers

    When using Debezium to capture changes from an Oracle database that interacts with Oracle GoldenGate, you may have observed some situations where the low watermark in the offsets did not advance across restarts. This was especially the case when setting lob.enabled to true.

    In this update, the Debezium Oracle connector will now skip GoldenGate’s replication marker events, which will prevent those synthetic transactions from cluttering the transaction buffer and forcing users to rely on transaction retention (DBZ-8533).

    Vitess

    Unparseable DDL handling improvements

    Several new improvements have been added to Vitess to make the parsing and handling of DDL changes more fault-tolerant (DBZ-8479). These improvements include:

    • Only parsing DDL events if that specific table is tracked in the schema history

    • Strip comments to mimic parse failures

    • Make table and type fault tolerance to gracefully handle edge cases

    K8s Operator

    Support JDBC offset/history configurations DBZ-8501

    We have improved the Kubernetes Operator by introducing support for the JDBC storage module, allowing it to be configured via the CRD.

    In the offset schema reference, a new property section called jdbc has been added, which describes the JDBC offset backing store properties. In the schema history reference, a new property section called jdbc has been added, which describes the JDBC schema history store properties.

    This allows users to store offsets and schema history details for a connector deployed using the Debezium Operator in a JDBC data store easily (DBZ-8501).

    Other fixes

    In total there were 43 issues resolved in Debezium 3.0.5.Final. The list of changes can also be found in our release notes.

    Here are some noteworthy changes:

    • Create smoke test to make sure Debezium Server container image works DBZ-3226

    • Error with debezium.sink.pulsar.client.serviceUrl and debezium-server DBZ-3720

    • MySQL regression - Defaults store.only.captured.tables.ddl to true DBZ-6709

    • ExtractNewRecordState value of optional null field which has default value DBZ-7094

    • DebeziumException: No column '' where ' found in table DBZ-8034

    • Align MySQL and MariaDB grammars with upstream versions DBZ-8270

    • MySQL Connector Does Not Act On CREATE DATABASE Records In The Binlog DBZ-8291

    • Vgtid doesn’t contain multiple shard GTIDs when multiple tasks are used DBZ-8432

    • Support MongoDB 8.0 DBZ-8451

    • Update description of message.key.columns and format admonitions in PG doc DBZ-8455

    • Object ID cache may fail with concurrent modification exception DBZ-8465

    • Add Basic validation in UI to check for form completion before submitting. DBZ-8474

    • Use schema evolution tool to manage the conductor database DBZ-8486

    • Oracle gathers and logs object attributes for views unnecessarily DBZ-8492

    • ReselectColumnPostProcessor can throw ORA-01003 "no statement parsed" when using fallback non-flashback area query DBZ-8493

    • Oracle DDL ALTER TABLE ADD CONSTRAINT fails to be parsed DBZ-8494

    • Edit Source/Destination on adding new configuration properties its removing old once DBZ-8495

    • Invalid property name in JDBC Schema History DBZ-8500

    • Fix the URL in Pipeline log page DBZ-8502

    • Failed to start LogMiner mining session due to "Required Start SCN" error message DBZ-8503

    • Oracle data pump TEMPLATE_TABLE clause not supported DBZ-8504

    • Postgres alpine images require lang/llvm 19 for build DBZ-8505

    • Update Quarkus Outbox Extension to Quarkus 3.17.3 DBZ-8506

    • Merge conductor and stage into single platform repository DBZ-8508

    • TimezoneConverter include.list should be respected if set DBZ-8514

    • Missing log classes debezium-platform-conductor DBZ-8515

    • Debezium Server fails to start when using the sink Kinesis DBZ-8517

    • Skip GoldenGate REPLICATION MARKER events DBZ-8533

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file + Debezium 3.0.5.Final Released

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    Breaking Changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Kafka Signal Source

    Debezium was reprocessing Kafka-based signals on connector restarts, which could introduce unpredictable behavior with unintended side effects. As a result, this feature has been removed, and if a connector stops, then the signal must be re-sent (DBZ-7856).

    Change Event Source Info Block

    The snapshot enumeration in the source information block in Debezium’s change events was extended to include all possible cases (DBZ-8496).

    New features and improvements

    Debezium 3.0.5.Final introduces a couple of improvements and features, lets take a look at each individually.

    Core

    Allow ad-hoc blocking snapshots on unknown tables

    While we recommend using the default configurations for schema management for all tables, we understand that isn’t always possible for every environment. A common question we often see is how does one snapshot newly added tables, particularly if the connector is configured using store.only.captured.tables.ddl as true.

    In this update, we have simplified the process where you can update your connector configuration with the new table, and once the connector task has restarted, sending an ad-hoc blocking snapshot signal will be permitted even on tables that are unknown but match the capture filters (DBZ-4903).

    Snapshot dispatch failure handling improvements

    When processing change events, users can control how the connector reacts to specific event handling failures using event.processing.failure.handling.mode; however, this has traditionally only been applicable for streaming changes.

    There are some corner cases where emitting snapshot events could also raise failures. To improve this experience, the event.processing.failure.handling.mode now influences failures when dispatching snapshot events, too (DBZ-8433).

    Connector startup configuration logging improved

    When a Debezium source connector starts, it logs all connector configurations. This is very helpful for both the team when diagnosing problems but also for users to identify that their configurations are being understood properly.

    In this update, the logging for the connector configuration has changed slightly. You will notice rather than placing each configuration property as a new log entry, each key/value will be appended together separated by new lines, and added to the log as a single entry. This makes it easier to identify these log lines and for tooling to exclude such log entries easier if needed (DBZ-8472).

    Postgres

    Support for PostgreSQL 17 fail-over replication slots

    PostgreSQL 17 introduces a long awaited feature called fail-over for replication slots.

    When the replication slot is created, a new property can be specified to enable the replication slot to be created and available on the fail-over replica. The synchronization of the fail-over replication slot can be performed manually by calling pg_sync_replication_slots() or automatically by enabling the sync_replication_slots feature.

    When automatic synchronization is enabled, this allows Debezium to consume from that slot immediately on fail-over to the replica and not miss any events.

    To take advantage of this new feature, your connector must be configured with slot.failover set to true. Debezium must be connecting to PostgreSQL 17+ and the primary on the cluster, otherwise no fail-over replication slot will be created (DBZ-8412).

    Oracle

    New metrics to track partial rollback events

    A partial rollback event is something unique to Oracle where a user performs an operation, it gets recorded as being performed in the transaction logs, but then a validation check forces the operation to be undone and rolled back. One of the most notable ways to trigger partial rollbacks are with constraint violations.

    The new JMX metric, NumberOfPartialRollbackCount, tracks the frequency of this event sequence in the transaction logs (DBZ-8491).

    The increase in this metric should be rare, and having a few occurrences throughout the day should pose no major problem.

    When you notice this metric increases frequently in a small window of time, this means you could have a poorly written script where a job is relying on constraint violations for some logic. While the connector can handle these use cases, it’s important to understand this creates unnecessary transaction log activity that can directly impact the speed and latency of streaming changes.

    Skip GoldenGate replication markers

    When using Debezium to capture changes from an Oracle database that interacts with Oracle GoldenGate, you may have observed some situations where the low watermark in the offsets did not advance across restarts. This was especially the case when setting lob.enabled to true.

    In this update, the Debezium Oracle connector will now skip GoldenGate’s replication marker events, which will prevent those synthetic transactions from cluttering the transaction buffer and forcing users to rely on transaction retention (DBZ-8533).

    Vitess

    Unparseable DDL handling improvements

    Several new improvements have been added to Vitess to make the parsing and handling of DDL changes more fault-tolerant (DBZ-8479). These improvements include:

    • Only parsing DDL events if that specific table is tracked in the schema history

    • Strip comments to mimic parse failures

    • Make table and type fault tolerance to gracefully handle edge cases

    K8s Operator

    Support JDBC offset/history configurations DBZ-8501

    We have improved the Kubernetes Operator by introducing support for the JDBC storage module, allowing it to be configured via the CRD.

    In the offset schema reference, a new property section called jdbc has been added, which describes the JDBC offset backing store properties. In the schema history reference, a new property section called jdbc has been added, which describes the JDBC schema history store properties.

    This allows users to store offsets and schema history details for a connector deployed using the Debezium Operator in a JDBC data store easily (DBZ-8501).

    Other fixes

    In total there were 43 issues resolved in Debezium 3.0.5.Final. The list of changes can also be found in our release notes.

    Here are some noteworthy changes:

    • Create smoke test to make sure Debezium Server container image works DBZ-3226

    • Error with debezium.sink.pulsar.client.serviceUrl and debezium-server DBZ-3720

    • MySQL regression - Defaults store.only.captured.tables.ddl to true DBZ-6709

    • ExtractNewRecordState value of optional null field which has default value DBZ-7094

    • DebeziumException: No column '' where ' found in table DBZ-8034

    • Align MySQL and MariaDB grammars with upstream versions DBZ-8270

    • MySQL Connector Does Not Act On CREATE DATABASE Records In The Binlog DBZ-8291

    • Vgtid doesn’t contain multiple shard GTIDs when multiple tasks are used DBZ-8432

    • Support MongoDB 8.0 DBZ-8451

    • Update description of message.key.columns and format admonitions in PG doc DBZ-8455

    • Object ID cache may fail with concurrent modification exception DBZ-8465

    • Add Basic validation in UI to check for form completion before submitting. DBZ-8474

    • Use schema evolution tool to manage the conductor database DBZ-8486

    • Oracle gathers and logs object attributes for views unnecessarily DBZ-8492

    • ReselectColumnPostProcessor can throw ORA-01003 "no statement parsed" when using fallback non-flashback area query DBZ-8493

    • Oracle DDL ALTER TABLE ADD CONSTRAINT fails to be parsed DBZ-8494

    • Edit Source/Destination on adding new configuration properties its removing old once DBZ-8495

    • Invalid property name in JDBC Schema History DBZ-8500

    • Fix the URL in Pipeline log page DBZ-8502

    • Failed to start LogMiner mining session due to "Required Start SCN" error message DBZ-8503

    • Oracle data pump TEMPLATE_TABLE clause not supported DBZ-8504

    • Postgres alpine images require lang/llvm 19 for build DBZ-8505

    • Update Quarkus Outbox Extension to Quarkus 3.17.3 DBZ-8506

    • Merge conductor and stage into single platform repository DBZ-8508

    • TimezoneConverter include.list should be respected if set DBZ-8514

    • Missing log classes debezium-platform-conductor DBZ-8515

    • Debezium Server fails to start when using the sink Kinesis DBZ-8517

    • Skip GoldenGate REPLICATION MARKER events DBZ-8533

    Chris Cranford

    Chris is a software engineer at Red Hat. He previously was a member of the Hibernate ORM team and now works on Debezium. He lives in North Carolina just a few hours from Red Hat towers.

       


    About Debezium

    Debezium is an open source distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Get involved

    We hope you find Debezium interesting and useful, and want to give it a try. Follow us on Twitter @debezium, chat with us on Zulip, or join our mailing list to talk with the community. All of the code is open source on GitHub, so build the code locally and help us improve ours existing connectors and add even more connectors. If you find problems or have ideas how we can improve Debezium, please let us know or log an issue.

    \ No newline at end of file diff --git a/blog/index.html b/blog/index.html index 3bcf1d63cf..b38b773a35 100644 --- a/blog/index.html +++ b/blog/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    After consulting with the community both inside and outside of Red Hat, we have made the decision to submit a request for the Debezium project to join the Commonhaus Foundation.

    The Debezium project is conducting our 2024 Community Feedback survey, and we want to hear from YOU!

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    After consulting with the community both inside and outside of Red Hat, we have made the decision to submit a request for the Debezium project to join the Commonhaus Foundation.

    The Debezium project is conducting our 2024 Community Feedback survey, and we want to hear from YOU!

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    \ No newline at end of file diff --git a/blog/page/10/index.html b/blog/page/10/index.html index 414597b144..1373c7e6b4 100644 --- a/blog/page/10/index.html +++ b/blog/page/10/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    The Debezium UI team continues to add support for more features, allowing users to configure connectors more easily. In this article, we’ll describe and demonstrate how to provide the additional properties for configuration that the UI does not expose by default. Read further for more information!

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    The Debezium UI team continues to add support for more features, allowing users to configure connectors more easily. In this article, we’ll describe and demonstrate how to provide the additional properties for configuration that the UI does not expose by default. Read further for more information!

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    \ No newline at end of file diff --git a/blog/page/11/index.html b/blog/page/11/index.html index 3f877a2584..12b9dd7f77 100644 --- a/blog/page/11/index.html +++ b/blog/page/11/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    \ No newline at end of file diff --git a/blog/page/12/index.html b/blog/page/12/index.html index 92532648fb..ea7cbdaf26 100644 --- a/blog/page/12/index.html +++ b/blog/page/12/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    This post is the final part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first installment of this series is found here and the second installment is found here.

    In this third and final installment, we are going to build on what we have done in the previous two posts, focusing on the following areas:

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    This post is the final part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first installment of this series is found here and the second installment is found here.

    In this third and final installment, we are going to build on what we have done in the previous two posts, focusing on the following areas:

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    \ No newline at end of file diff --git a/blog/page/13/index.html b/blog/page/13/index.html index 837e174bd1..8ab48f84b9 100644 --- a/blog/page/13/index.html +++ b/blog/page/13/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    As you may have noticed, the Docker company recently announced a reduction of the free organization accounts offering. The Docker company wanted to provide for free organization accounts only for Docker-Sponsored Open Source (DSOS) projects. Debezium project doesn’t meet their definition of open source project as we have a pathway to commercialization. As the accounts ought to be terminated in 30 days, we immediately started to work on moving out the Debezium project from Docker Hub.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    As you may have noticed, the Docker company recently announced a reduction of the free organization accounts offering. The Docker company wanted to provide for free organization accounts only for Docker-Sponsored Open Source (DSOS) projects. Debezium project doesn’t meet their definition of open source project as we have a pathway to commercialization. As the accounts ought to be terminated in 30 days, we immediately started to work on moving out the Debezium project from Docker Hub.

    \ No newline at end of file diff --git a/blog/page/14/index.html b/blog/page/14/index.html index 36f0da27f5..c9a808bcf2 100644 --- a/blog/page/14/index.html +++ b/blog/page/14/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Hi everyone, my name is Mario Fiore Vitale and I recently joined Red Hat and the Debezium team.

    I am a very curious person that follows a continuous learning approach, I like to keep growing my skills. I care about code quality and readability.

    I have about 9+ years of experience and have worked for consultancy, startup, and enterprise product companies in different sectors. In my previously experience I had the chance to work on architecture re-design project to split a monolith into a microservices application. During this experience I gained experience with different technologies such as Kafka, Elasticsearch, Redis, Kubernetes, VictoriaMetrics, Spring Framework, and a bit of Cassandra.

    Why Am I here?

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Hi everyone, my name is Mario Fiore Vitale and I recently joined Red Hat and the Debezium team.

    I am a very curious person that follows a continuous learning approach, I like to keep growing my skills. I care about code quality and readability.

    I have about 9+ years of experience and have worked for consultancy, startup, and enterprise product companies in different sectors. In my previously experience I had the chance to work on architecture re-design project to split a monolith into a microservices application. During this experience I gained experience with different technologies such as Kafka, Elasticsearch, Redis, Kubernetes, VictoriaMetrics, Spring Framework, and a bit of Cassandra.

    Why Am I here?

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    \ No newline at end of file diff --git a/blog/page/15/index.html b/blog/page/15/index.html index fcdc1fcdca..e8ca01b637 100644 --- a/blog/page/15/index.html +++ b/blog/page/15/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    In November last year, we announced we were looking for reinforcements for the team. And I have two pieces of news for you today: a good one and an even better one.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    In November last year, we announced we were looking for reinforcements for the team. And I have two pieces of news for you today: a good one and an even better one.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    \ No newline at end of file diff --git a/blog/page/16/index.html b/blog/page/16/index.html index 5357b9c556..23076d71c0 100644 --- a/blog/page/16/index.html +++ b/blog/page/16/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    This tutorial was originally published by QuestDB, where guest contributor, Yitaek Hwang, shows us how to stream data into QuestDB with change data capture via Debezium and Kafka Connect.

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    As you are probably well aware, Gunnar Morling has stepped down from his position as Debezium project lead and is now pursuing new exciting adventures. It is sad, but every cloud has a silver lining!

    What can it be? We (the Debezium team and Red Hat) are hiring! Are you a community contributor? Do you have any pull requests under your belt? Are you a happy Debezium user and eager to do more, or are you a seasoned Java developer looking for work in an exciting and inclusive open-source environment?

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    Some time in early 2017, I got a meeting invite from Debezium’s founder, Randall Hauch. He was about to begin a new chapter in his professional career and was looking for someone to take over as the project lead for Debezium. So we hopped on a call to talk things through, and I was immediately sold on the concept of change data capture, its large number of potential use cases and applications, and the idea of making this available to the community as open-source. After some short consideration I decided to take up this opportunity, and without a doubt this has been one of the best decisions I’ve ever made in my job.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    This tutorial was originally published by QuestDB, where guest contributor, Yitaek Hwang, shows us how to stream data into QuestDB with change data capture via Debezium and Kafka Connect.

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    As you are probably well aware, Gunnar Morling has stepped down from his position as Debezium project lead and is now pursuing new exciting adventures. It is sad, but every cloud has a silver lining!

    What can it be? We (the Debezium team and Red Hat) are hiring! Are you a community contributor? Do you have any pull requests under your belt? Are you a happy Debezium user and eager to do more, or are you a seasoned Java developer looking for work in an exciting and inclusive open-source environment?

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    Some time in early 2017, I got a meeting invite from Debezium’s founder, Randall Hauch. He was about to begin a new chapter in his professional career and was looking for someone to take over as the project lead for Debezium. So we hopped on a call to talk things through, and I was immediately sold on the concept of change data capture, its large number of potential use cases and applications, and the idea of making this available to the community as open-source. After some short consideration I decided to take up this opportunity, and without a doubt this has been one of the best decisions I’ve ever made in my job.

    \ No newline at end of file diff --git a/blog/page/17/index.html b/blog/page/17/index.html index 15cf6e1cae..c4de2594ad 100644 --- a/blog/page/17/index.html +++ b/blog/page/17/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    When developing the tests for your project, sooner or later you will probably get into the situation when some of the tests fail randomly. These tests, also known as flaky tests, are very unpleasant as you never know if the failure was random or there is a regression in your code. In the worst case you just ignore these tests because you know they are flaky. Most of the testing frameworks even have a dedicated annotation or other means to express that the test is flaky and if it fails, the failure should be ignored. The value of such a test is very questionable. The best thing you can do with such a test is of course to fix it so that it doesn’t fail randomly. That’s easy to say, but harder to do. The hardest part is usually to make the test fail in your development environment so that you can debug it and understand why it fails and what is the root cause of the failure. In this blog post I’ll try to show a few techniques which may help you to simulate random test failures on you local machine.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first part of this series is here.

    In this second installment, we will build on what we did in part one by deploying the Oracle connector using Zookeeper, Kafka, and Kafka Connect. We are going to discuss a variety of configuration options for the connector and why they’re essential. And finally, we’re going to see the connector in action!

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    When developing the tests for your project, sooner or later you will probably get into the situation when some of the tests fail randomly. These tests, also known as flaky tests, are very unpleasant as you never know if the failure was random or there is a regression in your code. In the worst case you just ignore these tests because you know they are flaky. Most of the testing frameworks even have a dedicated annotation or other means to express that the test is flaky and if it fails, the failure should be ignored. The value of such a test is very questionable. The best thing you can do with such a test is of course to fix it so that it doesn’t fail randomly. That’s easy to say, but harder to do. The hardest part is usually to make the test fail in your development environment so that you can debug it and understand why it fails and what is the root cause of the failure. In this blog post I’ll try to show a few techniques which may help you to simulate random test failures on you local machine.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first part of this series is here.

    In this second installment, we will build on what we did in part one by deploying the Oracle connector using Zookeeper, Kafka, and Kafka Connect. We are going to discuss a variety of configuration options for the connector and why they’re essential. And finally, we’re going to see the connector in action!

    \ No newline at end of file diff --git a/blog/page/18/index.html b/blog/page/18/index.html index e6733df71a..624b90c554 100644 --- a/blog/page/18/index.html +++ b/blog/page/18/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. Throughout the series, we’ll examine all the steps to setting up a proof of concept (POC) deployment for Debezium for Oracle. We will discuss setup and configurations as well as the nuances of multi-tenancy. We will also dive into any known pitfalls and concerns you may need to know and how to debug specific problems. And finally, we’ll talk about performance and monitoring to maintain a healthy connector deployment.

    Throughout this exercise, we hope that this will show you just how simple it is to deploy Debezium for Oracle. This installation and setup portion of the series may seem quite complicated, but many of these steps likely already exist in a pre-existing environment. We will dive into each step, explaining it is essential should you use a container image deployment.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. Throughout the series, we’ll examine all the steps to setting up a proof of concept (POC) deployment for Debezium for Oracle. We will discuss setup and configurations as well as the nuances of multi-tenancy. We will also dive into any known pitfalls and concerns you may need to know and how to debug specific problems. And finally, we’ll talk about performance and monitoring to maintain a healthy connector deployment.

    Throughout this exercise, we hope that this will show you just how simple it is to deploy Debezium for Oracle. This installation and setup portion of the series may seem quite complicated, but many of these steps likely already exist in a pre-existing environment. We will dive into each step, explaining it is essential should you use a container image deployment.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    \ No newline at end of file diff --git a/blog/page/19/index.html b/blog/page/19/index.html index 4be79b5b6d..ea0b16184c 100644 --- a/blog/page/19/index.html +++ b/blog/page/19/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    As you probably noticed, we have started work on Debezium 2.0. One of the planned changes for the 2.0 release is to switch to Java 11 as a baseline. While some Java build providers still support Java 8, other Java 8 distributions already reached their end of life/support. Users are moving to Java 11 anyways, as surveys like New Relic’s State of the Java Ecosystem Report indicate. But it is not only matter of support: Java 11 comes with various performance improvements, useful tools like JDK Flight Recorder, which was open-sourced in Java 11, and more. So we felt it was about time to start thinking about using a more recent JDK as the baseline for Debezium, and the new major release is a natural milestone when to do the switch.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    As you probably noticed, we have started work on Debezium 2.0. One of the planned changes for the 2.0 release is to switch to Java 11 as a baseline. While some Java build providers still support Java 8, other Java 8 distributions already reached their end of life/support. Users are moving to Java 11 anyways, as surveys like New Relic’s State of the Java Ecosystem Report indicate. But it is not only matter of support: Java 11 comes with various performance improvements, useful tools like JDK Flight Recorder, which was open-sourced in Java 11, and more. So we felt it was about time to start thinking about using a more recent JDK as the baseline for Debezium, and the new major release is a natural milestone when to do the switch.

    \ No newline at end of file diff --git a/blog/page/2/index.html b/blog/page/2/index.html index 8121496294..55e07d968b 100644 --- a/blog/page/2/index.html +++ b/blog/page/2/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today, it’s my pleasure to talk to Lars M Johansson.

    tl;dr

    We are considering moving Debezium to a Software Foundation to expand our community, become more open and transparent in our roadmap and decisions, and encourage multi-vendor participation and execution.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today, it’s my pleasure to talk to Lars M Johansson.

    tl;dr

    We are considering moving Debezium to a Software Foundation to expand our community, become more open and transparent in our roadmap and decisions, and encourage multi-vendor participation and execution.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    \ No newline at end of file diff --git a/blog/page/20/index.html b/blog/page/20/index.html index 1e888937ca..6844a5b07a 100644 --- a/blog/page/20/index.html +++ b/blog/page/20/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    The engineering team at Shopify recently improved the Debezium MySQL connector so that it supports incremental snapshotting for databases without write access by the connector, which is required when pointing Debezium to read-only replicas. In addition, the Debezium MySQL connector now also allows schema changes during an incremental snapshot. This blog post explains the implementation details of those features.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    The engineering team at Shopify recently improved the Debezium MySQL connector so that it supports incremental snapshotting for databases without write access by the connector, which is required when pointing Debezium to read-only replicas. In addition, the Debezium MySQL connector now also allows schema changes during an incremental snapshot. This blog post explains the implementation details of those features.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    \ No newline at end of file diff --git a/blog/page/21/index.html b/blog/page/21/index.html index 9d08ef22bb..c8d73b33ed 100644 --- a/blog/page/21/index.html +++ b/blog/page/21/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    Hi everyone, my name is Vojtěch Juránek and I recently joined the Debezium team.

    Most of my professional IT career I’ve spent at Red Hat. I have a background in particle physics, but I did quite a lot programming even before joining Red Hat, when working on simulations of high-energy particle collisions and their data analysis. The science is by default open and all software I was using was open source as well. Here started my love for open source.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    Hi everyone, my name is Vojtěch Juránek and I recently joined the Debezium team.

    Most of my professional IT career I’ve spent at Red Hat. I have a background in particle physics, but I did quite a lot programming even before joining Red Hat, when working on simulations of high-energy particle collisions and their data analysis. The science is by default open and all software I was using was open source as well. Here started my love for open source.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    \ No newline at end of file diff --git a/blog/page/22/index.html b/blog/page/22/index.html index bd639d5f6b..32cf03783d 100644 --- a/blog/page/22/index.html +++ b/blog/page/22/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    TL,DR: Debezium is NOT affected by the recently disclosed remote code execution vulnerability in log4j2 (CVE-2021-44228); The log4j-1.2.17.jar shipped in Debezium’s container images contains a class JMSAppender, which is subject to a MODERATE vulnerability (CVE-2021-4104). This appender is NOT used by default, i.e. access to log4j’s configuration is required in order to exploit this CVE. As a measure of caution, we have decided to remove the JMSAppender class from Debezium’s container images as of version 1.7.2.Final, released today.

    On Dec 10th, a remote code execution vulnerability in the widely used log4j2 library was published (CVE-2021-44228). Debezium, just like Apache Kafka and Kafka Connect, does not use log4j2 and therefore is NOT affected by this CVE.

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    TL,DR: Debezium is NOT affected by the recently disclosed remote code execution vulnerability in log4j2 (CVE-2021-44228); The log4j-1.2.17.jar shipped in Debezium’s container images contains a class JMSAppender, which is subject to a MODERATE vulnerability (CVE-2021-4104). This appender is NOT used by default, i.e. access to log4j’s configuration is required in order to exploit this CVE. As a measure of caution, we have decided to remove the JMSAppender class from Debezium’s container images as of version 1.7.2.Final, released today.

    On Dec 10th, a remote code execution vulnerability in the widely used log4j2 library was published (CVE-2021-44228). Debezium, just like Apache Kafka and Kafka Connect, does not use log4j2 and therefore is NOT affected by this CVE.

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    \ No newline at end of file diff --git a/blog/page/23/index.html b/blog/page/23/index.html index 4864986a0e..1380d06664 100644 --- a/blog/page/23/index.html +++ b/blog/page/23/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    \ No newline at end of file diff --git a/blog/page/24/index.html b/blog/page/24/index.html index 401e171dfd..c513fce929 100644 --- a/blog/page/24/index.html +++ b/blog/page/24/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    At ScyllaDB, we develop a high-performance NoSQL database Scylla, API-compatible with Apache Cassandra, Amazon DynamoDB and Redis. Earlier this year, we introduced support for Change Data Capture in Scylla 4.3. This new feature seemed like a perfect match for integration with the Apache Kafka ecosystem, so we developed the Scylla CDC Source Connector using the Debezium framework. In this blogpost we will cover the basic structure of Scylla’s CDC, reasons we chose the Debezium framework and design decisions we made.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    Apache Kafka 2.8 allows for a first glimpse into the ZooKeeper-less future of the widely used event streaming platform: shipping with a preview of KIP-500 ("Replace ZooKeeper with a Self-Managed Metadata Quorum"), you can now run Kafka clusters without the need for setting up and operating Apache ZooKeeper. This does not only simplify running Kafka from an operational perspective, the new metadata quorum implementation (named "KRaft", Kafka Raft metadata mode) also should provide much better scaling characteristics, for instance when it comes to large numbers of topics and partitions.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    At ScyllaDB, we develop a high-performance NoSQL database Scylla, API-compatible with Apache Cassandra, Amazon DynamoDB and Redis. Earlier this year, we introduced support for Change Data Capture in Scylla 4.3. This new feature seemed like a perfect match for integration with the Apache Kafka ecosystem, so we developed the Scylla CDC Source Connector using the Debezium framework. In this blogpost we will cover the basic structure of Scylla’s CDC, reasons we chose the Debezium framework and design decisions we made.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    Apache Kafka 2.8 allows for a first glimpse into the ZooKeeper-less future of the widely used event streaming platform: shipping with a preview of KIP-500 ("Replace ZooKeeper with a Self-Managed Metadata Quorum"), you can now run Kafka clusters without the need for setting up and operating Apache ZooKeeper. This does not only simplify running Kafka from an operational perspective, the new metadata quorum implementation (named "KRaft", Kafka Raft metadata mode) also should provide much better scaling characteristics, for instance when it comes to large numbers of topics and partitions.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    \ No newline at end of file diff --git a/blog/page/25/index.html b/blog/page/25/index.html index 1ca44fd675..c226946abf 100644 --- a/blog/page/25/index.html +++ b/blog/page/25/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today it’s my pleasure to talk to Sergei Morozov.

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    Welcome to the newest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    It’s been a long time since our last edition. But we are back again! In case you missed our last edition, you can check it out here.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today it’s my pleasure to talk to Sergei Morozov.

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    Welcome to the newest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    It’s been a long time since our last edition. But we are back again! In case you missed our last edition, you can check it out here.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    \ No newline at end of file diff --git a/blog/page/26/index.html b/blog/page/26/index.html index 13700d37b9..cc25ff055b 100644 --- a/blog/page/26/index.html +++ b/blog/page/26/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    \ No newline at end of file diff --git a/blog/page/27/index.html b/blog/page/27/index.html index 525a3c85c7..804a49c8e4 100644 --- a/blog/page/27/index.html +++ b/blog/page/27/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    Kafka Streams is a library for developing stream processing applications based on Apache Kafka. Quoting its docs, "a Kafka Streams application processes record streams through a topology in real-time, processing data continuously, concurrently, and in a record-by-record manner". The Kafka Streams DSL provides a range of stream processing operations such as a map, filter, join, and aggregate.

    Non-Key Joins in Kafka Streams

    Debezium’s CDC source connectors make it easy to capture data changes in databases and push them towards sink systems such as Elasticsearch in near real-time. By default, this results in a 1:1 relationship between tables in the source database, the corresponding Kafka topics, and a representation of the data at the sink side, such as a search index in Elasticsearch.

    In case of 1:n relationships, say between a table of customers and a table of addresses, consumers often are interested in a view of the data that is a single, nested data structure, e.g. a single Elasticsearch document representing a customer and all their addresses.

    This is where KIP-213 ("Kafka Improvement Proposal") and its foreign key joining capabilities come in: it was introduced in Apache Kafka 2.4 "to close the gap between the semantics of KTables in streams and tables in relational databases". Before KIP-213, in order to join messages from two Debezium change event topics, you’d typically have to manually re-key at least one of the topics, so to make sure the same key is used on both sides of the join.

    Thanks to KIP-213, this isn’t needed any longer, as it allows to join two Kafka topics on fields extracted from the Kafka message value, taking care of the required re-keying automatically, in a fully transparent way. Comparing to previous approaches, this drastically reduces the effort for creating aggregated events from Debezium’s CDC events.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    Kafka Streams is a library for developing stream processing applications based on Apache Kafka. Quoting its docs, "a Kafka Streams application processes record streams through a topology in real-time, processing data continuously, concurrently, and in a record-by-record manner". The Kafka Streams DSL provides a range of stream processing operations such as a map, filter, join, and aggregate.

    Non-Key Joins in Kafka Streams

    Debezium’s CDC source connectors make it easy to capture data changes in databases and push them towards sink systems such as Elasticsearch in near real-time. By default, this results in a 1:1 relationship between tables in the source database, the corresponding Kafka topics, and a representation of the data at the sink side, such as a search index in Elasticsearch.

    In case of 1:n relationships, say between a table of customers and a table of addresses, consumers often are interested in a view of the data that is a single, nested data structure, e.g. a single Elasticsearch document representing a customer and all their addresses.

    This is where KIP-213 ("Kafka Improvement Proposal") and its foreign key joining capabilities come in: it was introduced in Apache Kafka 2.4 "to close the gap between the semantics of KTables in streams and tables in relational databases". Before KIP-213, in order to join messages from two Debezium change event topics, you’d typically have to manually re-key at least one of the topics, so to make sure the same key is used on both sides of the join.

    Thanks to KIP-213, this isn’t needed any longer, as it allows to join two Kafka topics on fields extracted from the Kafka message value, taking care of the required re-keying automatically, in a fully transparent way. Comparing to previous approaches, this drastically reduces the effort for creating aggregated events from Debezium’s CDC events.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    \ No newline at end of file diff --git a/blog/page/28/index.html b/blog/page/28/index.html index 5eb94161f5..637038f8c6 100644 --- a/blog/page/28/index.html +++ b/blog/page/28/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    A Happy New Year to the Debezium Community!

    May all your endavours be successful, your data be consistent, and most importantly, everyone stay safe and healthy. With 2020 in the books, I thought it’d be nice to take a look back and do a quick recap of what has happened around Debezium over the last year.

    First, some facts and numbers for you stats lovers out there:

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    A Happy New Year to the Debezium Community!

    May all your endavours be successful, your data be consistent, and most importantly, everyone stay safe and healthy. With 2020 in the books, I thought it’d be nice to take a look back and do a quick recap of what has happened around Debezium over the last year.

    First, some facts and numbers for you stats lovers out there:

    \ No newline at end of file diff --git a/blog/page/29/index.html b/blog/page/29/index.html index d6b3fbbe03..21f96bcbb3 100644 --- a/blog/page/29/index.html +++ b/blog/page/29/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    The current pattern in application development gravitates toward microservices and microservices architecture. While this approach gives the developer teams great flexibility in terms of independent deployments and development velocity, the drawback is at hand when you try to track a bug in production. Monolithic applications sit nicely at a single place so you can introspect the code flows and the application’s runtime state. This is more challenging with microservice architectures, as a single business transaction...

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    The current pattern in application development gravitates toward microservices and microservices architecture. While this approach gives the developer teams great flexibility in terms of independent deployments and development velocity, the drawback is at hand when you try to track a bug in production. Monolithic applications sit nicely at a single place so you can introspect the code flows and the application’s runtime state. This is more challenging with microservice architectures, as a single business transaction...

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    \ No newline at end of file diff --git a/blog/page/3/index.html b/blog/page/3/index.html index 89465c1e6b..f5785ec093 100644 --- a/blog/page/3/index.html +++ b/blog/page/3/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    \ No newline at end of file diff --git a/blog/page/30/index.html b/blog/page/30/index.html index eedd7ca79b..88569bcd39 100644 --- a/blog/page/30/index.html +++ b/blog/page/30/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    This post originally appeared on the Bolt Labs Engineering blog.

    Traditionally, MySQL has been used to power most of the backend services at Bolt. We’ve designed our schemas in a way that they’re sharded into different MySQL clusters. Each MySQL cluster contains a subset of data and consists of one primary and multiple replication nodes.

    Once data is persisted to the database, we use the Debezium MySQL Connector to capture data change events and send them to Kafka. This gives us an easy and reliable way to communicate changes between back-end microservices.

    Hello everyone, my name is Anisha Mohanty and I recently joined Red Hat and the Debezium team.

    I started my journey with Red Hat in April 2020 after completing my graduation. I was introduced to open source in my early college days, but I wasn’t aware of how organizations work and wanted to get the essence of open source ethics and values. That is something that I am fascinated to learn as I joined Red Hat.

    My work started under the Data Virtualization team with Teiid and then under the GRAPHQLCRUD project which is a standard for a generic query interface on top of GraphQL. The project has started well and is in great shape right now. We have successfully added CRUD capabilities, paging, and filtering specifications.

    Coming to Debezium, I first heard about it as some DV members started contributing here, well back then it was a completely new thing for me. I started exploring more, and it was not long when I had my first interaction with Gunnar and Jiri. With a warm welcome and great team here, I am really excited to work with the Debezium Community.

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    Over the last five years, Debezium has become a leading open-source solution for change data capture for a variety of databases. Users from all kinds of industries work with Debezium for use cases like replication of data from operational databases into data warehouses, updating caches and search indexes, driving streaming queries via Kafka Streams or Apache Flink, synchronizing data between microservices, and many more.

    When talking to Debezium users, we generally receive very good feedback on the range of applications enabled by Debezium and its flexibility: e.g. each connector can be configured and fine-tuned in many ways, depending on your specific requirements. A large number of metrics provide deep insight into the state of running Debezium connectors, allowing to safely operate CDC pipelines also in huge installations with thousands of connectors.

    All this comes at the cost of a learning curve, though: users new to Debezium need to understand the different options and settings as well as learn about best practices for running Debezium in production. We’re therefore constantly exploring how the user experience of Debezium can be further improved, allowing people to set up and operate its connectors more easily.

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    This post originally appeared on the Bolt Labs Engineering blog.

    Traditionally, MySQL has been used to power most of the backend services at Bolt. We’ve designed our schemas in a way that they’re sharded into different MySQL clusters. Each MySQL cluster contains a subset of data and consists of one primary and multiple replication nodes.

    Once data is persisted to the database, we use the Debezium MySQL Connector to capture data change events and send them to Kafka. This gives us an easy and reliable way to communicate changes between back-end microservices.

    Hello everyone, my name is Anisha Mohanty and I recently joined Red Hat and the Debezium team.

    I started my journey with Red Hat in April 2020 after completing my graduation. I was introduced to open source in my early college days, but I wasn’t aware of how organizations work and wanted to get the essence of open source ethics and values. That is something that I am fascinated to learn as I joined Red Hat.

    My work started under the Data Virtualization team with Teiid and then under the GRAPHQLCRUD project which is a standard for a generic query interface on top of GraphQL. The project has started well and is in great shape right now. We have successfully added CRUD capabilities, paging, and filtering specifications.

    Coming to Debezium, I first heard about it as some DV members started contributing here, well back then it was a completely new thing for me. I started exploring more, and it was not long when I had my first interaction with Gunnar and Jiri. With a warm welcome and great team here, I am really excited to work with the Debezium Community.

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    Over the last five years, Debezium has become a leading open-source solution for change data capture for a variety of databases. Users from all kinds of industries work with Debezium for use cases like replication of data from operational databases into data warehouses, updating caches and search indexes, driving streaming queries via Kafka Streams or Apache Flink, synchronizing data between microservices, and many more.

    When talking to Debezium users, we generally receive very good feedback on the range of applications enabled by Debezium and its flexibility: e.g. each connector can be configured and fine-tuned in many ways, depending on your specific requirements. A large number of metrics provide deep insight into the state of running Debezium connectors, allowing to safely operate CDC pipelines also in huge installations with thousands of connectors.

    All this comes at the cost of a learning curve, though: users new to Debezium need to understand the different options and settings as well as learn about best practices for running Debezium in production. We’re therefore constantly exploring how the user experience of Debezium can be further improved, allowing people to set up and operate its connectors more easily.

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    \ No newline at end of file diff --git a/blog/page/31/index.html b/blog/page/31/index.html index 02d82d17b4..e2c51f747a 100644 --- a/blog/page/31/index.html +++ b/blog/page/31/index.html @@ -7,4 +7,4 @@ num.partitions = 1 compression.type = producer log.cleanup.policy = delete -log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    Read More    

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    \ No newline at end of file +log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    Read More    

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    \ No newline at end of file diff --git a/blog/page/32/index.html b/blog/page/32/index.html index b7cff0af26..a907acf822 100644 --- a/blog/page/32/index.html +++ b/blog/page/32/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    Hello everyone, my name is René Kerner and I recently joined Red Hat and the Debezium team.

    I was working at trivago since 2011, and in 2016 we started using Debezium at version 0.4/0.5 for capturing clickstreams in the offshore datacenters into Kafka and aggregate them in the central cluster. We really intensified Debezium usage within one year and in 2017 we also used it for trivago’s main data.

    In 2014 I did my first OSS contributions to Composer, PHP’s dependency management and gave my first talk on it at the Developer Conference (called code.talks for many years now). Then in 2017 I did my first contributions to Debezium with work on the MySQL snapshot process and fixing a MySQL TIME data type issue.

    In 2018 I left trivago and started working at Codecentric as a consultant for software architecture and development (mainly JVM focus) and Apache Kafka, doing many trainings and workshops at German "Fortune 500" companies (insurances, industrial sector, media). I was doing lots of networking at that time, where I learned how awesome the community around Kafka is. I was always quite sad I didn’t have more time to focus on OSS projects.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    Hello everyone, my name is René Kerner and I recently joined Red Hat and the Debezium team.

    I was working at trivago since 2011, and in 2016 we started using Debezium at version 0.4/0.5 for capturing clickstreams in the offshore datacenters into Kafka and aggregate them in the central cluster. We really intensified Debezium usage within one year and in 2017 we also used it for trivago’s main data.

    In 2014 I did my first OSS contributions to Composer, PHP’s dependency management and gave my first talk on it at the Developer Conference (called code.talks for many years now). Then in 2017 I did my first contributions to Debezium with work on the MySQL snapshot process and fixing a MySQL TIME data type issue.

    In 2018 I left trivago and started working at Codecentric as a consultant for software architecture and development (mainly JVM focus) and Apache Kafka, doing many trainings and workshops at German "Fortune 500" companies (insurances, industrial sector, media). I was doing lots of networking at that time, where I learned how awesome the community around Kafka is. I was always quite sad I didn’t have more time to focus on OSS projects.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    \ No newline at end of file diff --git a/blog/page/33/index.html b/blog/page/33/index.html index 0df428de64..0cbf7f7242 100644 --- a/blog/page/33/index.html +++ b/blog/page/33/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    Change events streamed from a database by Debezium are (in developer parlance) strongly typed. This means that event consumers should be aware of the types of data conveyed in the events. This problem of passing along message type data can be solved in multiple ways:

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    Change events streamed from a database by Debezium are (in developer parlance) strongly typed. This means that event consumers should be aware of the types of data conveyed in the events. This problem of passing along message type data can be solved in multiple ways:

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    \ No newline at end of file diff --git a/blog/page/34/index.html b/blog/page/34/index.html index 90bd789071..6c4995ea63 100644 --- a/blog/page/34/index.html +++ b/blog/page/34/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    Setting up change data capture (CDC) pipelines with Debezium typically is a matter of configuration, without any programming being involved. It’s still a very good idea to have automated tests for your CDC set-up, making sure that everything is configured correctly and that your Debezium connectors are set up as intended.

    There’s two main components involved whose configuration need consideration:

    • The source database: it must be set up so that Debezium can connect to it and retrieve change events; details depend on the specific database, e.g. for MySQL the binlog must be in "row" mode, for Postgres, one of the supported logical decoding plug-ins must be installed, etc.

    • The Debezium connector: it must be configured using the right database host and credentials, possibly using SSL, applying table and column filters, potentially one or more single message transformations (SMTs), etc.

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    We have developed a Debezium connector for usage with Db2 which is now available as part of the Debezium incubator. Here we describe the use case we have for Change Data Capture (CDC), the various approaches that already exist in the Db2 ecology, and how we came to Debezium. In addition, we motivate the approach we took to implementing the Db2 Debezium connector.

    In this blog post, we are going to discuss how Delhivery, the leading supply chain services company in India, is using Debezium to power a lot of different business use-cases ranging from driving event driven microservices, providing data integration and moving operational data to a data warehouse for real-time analytics and reporting. We will also take a look at the early mistakes we made when integrating Debezium and how we solved them so that any future users can avoid them, discuss one of the more challenging production incidents we faced and how Debezium helped ensure we could recover without any data loss. In closing, we discuss what value Debezium has provided us, areas where we believe there is a scope for improvement and how Debezium fits into our future goals.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    Setting up change data capture (CDC) pipelines with Debezium typically is a matter of configuration, without any programming being involved. It’s still a very good idea to have automated tests for your CDC set-up, making sure that everything is configured correctly and that your Debezium connectors are set up as intended.

    There’s two main components involved whose configuration need consideration:

    • The source database: it must be set up so that Debezium can connect to it and retrieve change events; details depend on the specific database, e.g. for MySQL the binlog must be in "row" mode, for Postgres, one of the supported logical decoding plug-ins must be installed, etc.

    • The Debezium connector: it must be configured using the right database host and credentials, possibly using SSL, applying table and column filters, potentially one or more single message transformations (SMTs), etc.

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    We have developed a Debezium connector for usage with Db2 which is now available as part of the Debezium incubator. Here we describe the use case we have for Change Data Capture (CDC), the various approaches that already exist in the Db2 ecology, and how we came to Debezium. In addition, we motivate the approach we took to implementing the Db2 Debezium connector.

    In this blog post, we are going to discuss how Delhivery, the leading supply chain services company in India, is using Debezium to power a lot of different business use-cases ranging from driving event driven microservices, providing data integration and moving operational data to a data warehouse for real-time analytics and reporting. We will also take a look at the early mistakes we made when integrating Debezium and how we solved them so that any future users can avoid them, discuss one of the more challenging production incidents we faced and how Debezium helped ensure we could recover without any data loss. In closing, we discuss what value Debezium has provided us, areas where we believe there is a scope for improvement and how Debezium fits into our future goals.

    \ No newline at end of file diff --git a/blog/page/35/index.html b/blog/page/35/index.html index 7099b0d1df..370f6bc7d0 100644 --- a/blog/page/35/index.html +++ b/blog/page/35/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    One of the typical Debezium uses cases is to use change data capture to integrate a legacy system with other systems in the organization. There are multiple ways how to achieve this goal

    • Write data to Kafka using Debezium and follow with a combination of Kafka Streams pipelines and Kafka Connect connectors to deliver the changes to other systems

    • Use Debezium Embedded engine in a Java standalone application and write the integration code using plain Java; that’s often used to send change events to alternative messaging infrastructure such as Amazon Kinesis, Google Pub/Sub etc.

    • Use an existing integration framework or service bus to express the pipeline logic

    This article is focusing on the third option - a dedicated integration framework.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    One of the typical Debezium uses cases is to use change data capture to integrate a legacy system with other systems in the organization. There are multiple ways how to achieve this goal

    • Write data to Kafka using Debezium and follow with a combination of Kafka Streams pipelines and Kafka Connect connectors to deliver the changes to other systems

    • Use Debezium Embedded engine in a Java standalone application and write the integration code using plain Java; that’s often used to send change events to alternative messaging infrastructure such as Amazon Kinesis, Google Pub/Sub etc.

    • Use an existing integration framework or service bus to express the pipeline logic

    This article is focusing on the third option - a dedicated integration framework.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    \ No newline at end of file diff --git a/blog/page/36/index.html b/blog/page/36/index.html index 9733252504..0e79475bc9 100644 --- a/blog/page/36/index.html +++ b/blog/page/36/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    When a Debezium connector is deployed to a Kafka Connect instance it is sometimes necessary to keep database credentials hidden from other users of the Connect API.

    Let’s remind how a connector registration request looks like for the MySQL Debezium connector:

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    When a Debezium connector is deployed to a Kafka Connect instance it is sometimes necessary to keep database credentials hidden from other users of the Connect API.

    Let’s remind how a connector registration request looks like for the MySQL Debezium connector:

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    \ No newline at end of file diff --git a/blog/page/37/index.html b/blog/page/37/index.html index 7d4327ddc5..9335259aa5 100644 --- a/blog/page/37/index.html +++ b/blog/page/37/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    \ No newline at end of file diff --git a/blog/page/38/index.html b/blog/page/38/index.html index d743cb727a..731bb4a152 100644 --- a/blog/page/38/index.html +++ b/blog/page/38/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

    \ No newline at end of file diff --git a/blog/page/39/index.html b/blog/page/39/index.html index 69b0ed994b..16c41e3303 100644 --- a/blog/page/39/index.html +++ b/blog/page/39/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    This post originally appeared on the WePay Engineering blog.

    In the first half of this blog post series, we explained our decision-making process of designing a streaming data pipeline for Cassandra at WePay. In this post, we will break down the pipeline into three sections and discuss each of them in more detail:

    1. Cassandra to Kafka with CDC agent

    2. Kafka with BigQuery with KCBQ

    3. Transformation with BigQuery view

    This post originally appeared on the WePay Engineering blog.

    Historically, MySQL had been the de-facto database of choice for microservices at WePay. As WePay scales, the sheer volume of data written into some of our microservice databases demanded us to make a scaling decision between sharded MySQL (i.e. Vitess) and switching to a natively sharded NoSQL database. After a series of evaluations, we picked Cassandra, a NoSQL database, primarily because of its high availability, horizontal scalability, and ability to handle high write throughput.

    Debezium has received a huge improvement to the structure of its container images recently, making it extremely simple to extend its behaviour.

    This is a small tutorial showing how you can for instance add Sentry, "an open-source error tracking [software] that helps developers monitor and fix crashes in real time". Here we’ll use it to collect and report any exceptions from Kafka Connect and its connectors. Note that this is only applicable for Debezium 0.9+.

    We need a few things to have Sentry working, and we’ll add all of them and later have a Dockerfile which gets it all glued correctly:

    • Configure Log4j

    • SSL certificate for sentry.io, since it’s not by default in the JVM trusted chain

    • The sentry and sentry-log4j libraries

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    This post originally appeared on the WePay Engineering blog.

    In the first half of this blog post series, we explained our decision-making process of designing a streaming data pipeline for Cassandra at WePay. In this post, we will break down the pipeline into three sections and discuss each of them in more detail:

    1. Cassandra to Kafka with CDC agent

    2. Kafka with BigQuery with KCBQ

    3. Transformation with BigQuery view

    This post originally appeared on the WePay Engineering blog.

    Historically, MySQL had been the de-facto database of choice for microservices at WePay. As WePay scales, the sheer volume of data written into some of our microservice databases demanded us to make a scaling decision between sharded MySQL (i.e. Vitess) and switching to a natively sharded NoSQL database. After a series of evaluations, we picked Cassandra, a NoSQL database, primarily because of its high availability, horizontal scalability, and ability to handle high write throughput.

    Debezium has received a huge improvement to the structure of its container images recently, making it extremely simple to extend its behaviour.

    This is a small tutorial showing how you can for instance add Sentry, "an open-source error tracking [software] that helps developers monitor and fix crashes in real time". Here we’ll use it to collect and report any exceptions from Kafka Connect and its connectors. Note that this is only applicable for Debezium 0.9+.

    We need a few things to have Sentry working, and we’ll add all of them and later have a Dockerfile which gets it all glued correctly:

    • Configure Log4j

    • SSL certificate for sentry.io, since it’s not by default in the JVM trusted chain

    • The sentry and sentry-log4j libraries

    \ No newline at end of file diff --git a/blog/page/4/index.html b/blog/page/4/index.html index a8b330a67d..db81791c21 100644 --- a/blog/page/4/index.html +++ b/blog/page/4/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    \ No newline at end of file diff --git a/blog/page/40/index.html b/blog/page/40/index.html index 2de1416e50..f4021bd332 100644 --- a/blog/page/40/index.html +++ b/blog/page/40/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    The Debezium project strives to provide an easy deployment of connectors, so users can try and run connectors of their choice mostly by getting the right connector archive and unpacking it into the plug-in path of Kafka Connect.

    This is true for all connectors but for the Debezium PostgreSQL connector. This connector is specific in the regard that it requires a logical decoding plug-in to be installed inside the PostgreSQL source database(s) themselves. Currently, there are two supported logical plug-ins:

    • postgres-decoderbufs, which uses Protocol Buffers as a very compact transport format and which is maintained by the Debezium community

    • JSON-based, which is based on JSON and which is maintained by its own upstream community

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    The Debezium project strives to provide an easy deployment of connectors, so users can try and run connectors of their choice mostly by getting the right connector archive and unpacking it into the plug-in path of Kafka Connect.

    This is true for all connectors but for the Debezium PostgreSQL connector. This connector is specific in the regard that it requires a logical decoding plug-in to be installed inside the PostgreSQL source database(s) themselves. Currently, there are two supported logical plug-ins:

    • postgres-decoderbufs, which uses Protocol Buffers as a very compact transport format and which is maintained by the Debezium community

    • JSON-based, which is based on JSON and which is maintained by its own upstream community

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    \ No newline at end of file diff --git a/blog/page/41/index.html b/blog/page/41/index.html index 1511dcd326..f11eeb1d16 100644 --- a/blog/page/41/index.html +++ b/blog/page/41/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    This is a guest post by Apache Pulsar PMC Member and Committer Jia Zhai.

    Debezium is an open source project for change data capture (CDC). It is built on Apache Kafka Connect and supports multiple databases, such as MySQL, MongoDB, PostgreSQL, Oracle, and SQL Server. Apache Pulsar includes a set of built-in connectors based on Pulsar IO framework, which is counter part to Apache Kafka Connect.

    As of version 2.3.0, Pulsar IO comes with support for the Debezium source connectors out of the box, so you can leverage Debezium to stream changes from your databases into Apache Pulsar. This tutorial walks you through setting up the Debezium connector for MySQL with Pulsar IO.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    Hello everyone, my name is Chris Cranford and I recently joined the Debezium team.

    My journey at Red Hat began just over three years ago; however I have been in this line of work for nearly twenty years. All throughout my career, I have advocated and supported open source software. Many of my initial software endeavors were based on open source software, several which are still heavily used today such as Hibernate ORM.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    This is a guest post by Apache Pulsar PMC Member and Committer Jia Zhai.

    Debezium is an open source project for change data capture (CDC). It is built on Apache Kafka Connect and supports multiple databases, such as MySQL, MongoDB, PostgreSQL, Oracle, and SQL Server. Apache Pulsar includes a set of built-in connectors based on Pulsar IO framework, which is counter part to Apache Kafka Connect.

    As of version 2.3.0, Pulsar IO comes with support for the Debezium source connectors out of the box, so you can leverage Debezium to stream changes from your databases into Apache Pulsar. This tutorial walks you through setting up the Debezium connector for MySQL with Pulsar IO.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    Hello everyone, my name is Chris Cranford and I recently joined the Debezium team.

    My journey at Red Hat began just over three years ago; however I have been in this line of work for nearly twenty years. All throughout my career, I have advocated and supported open source software. Many of my initial software endeavors were based on open source software, several which are still heavily used today such as Hibernate ORM.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    \ No newline at end of file diff --git a/blog/page/42/index.html b/blog/page/42/index.html index c738504dde..d9d3b337ae 100644 --- a/blog/page/42/index.html +++ b/blog/page/42/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    Last week I had the pleasure to do a webinar on change data streaming patterns for microservices with the fabulous Burr Sutter at DevNation Live.

    The recording of that 30 min session is available on YouTube now. It also contains a demo that shows how to set-up a data streaming pipeline with Debezium and Apache Kafka, running on OpenShift. The demo begins at 12 min 40 into the recording.

    Enjoy!

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    Last week I had the pleasure to do a webinar on change data streaming patterns for microservices with the fabulous Burr Sutter at DevNation Live.

    The recording of that 30 min session is available on YouTube now. It also contains a demo that shows how to set-up a data streaming pipeline with Debezium and Apache Kafka, running on OpenShift. The demo begins at 12 min 40 into the recording.

    Enjoy!

    \ No newline at end of file diff --git a/blog/page/43/index.html b/blog/page/43/index.html index 9477ce1371..6b94f63a0d 100644 --- a/blog/page/43/index.html +++ b/blog/page/43/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    The second-level cache of Hibernate ORM / JPA is a proven and efficient way to increase application performance: caching read-only or rarely modified entities avoids roundtrips to the database, resulting in improved response times of the application.

    Unlike the first-level cache, the second-level cache is associated with the session factory (or entity manager factory in JPA terms), so its contents are shared across transactions and concurrent sessions. Naturally, if a cached entity gets modified, the corresponding cache entry must be updated (or purged from the cache), too. As long as the data changes are done through Hibernate ORM, this is nothing to worry about: the ORM will update the cache automatically.

    Things get tricky, though, when bypassing the application, e.g. when modifying records directly in the database. Hibernate ORM then has no way of knowing that the cached data has become stale, and it’s necessary to invalidate the affected items explicitly. A common way for doing so is to foresee some admin functionality that allows to clear an application’s caches. For this to work, it’s vital to not forget about calling that invalidation functionality, or the application will keep working with outdated cached data.

    In the following we’re going to explore an alternative approach for cache invalidation, which works in a reliable and fully automated way: by employing Debezium and its change data capture (CDC) capabilities, you can track data changes in the database itself and react to any applied change. This allows to invalidate affected cache entries in near-realtime, without the risk of stale data due to missed changes. If an entry has been evicted from the cache, Hibernate ORM will load the latest version of the entity from the database the next time is requested.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    The second-level cache of Hibernate ORM / JPA is a proven and efficient way to increase application performance: caching read-only or rarely modified entities avoids roundtrips to the database, resulting in improved response times of the application.

    Unlike the first-level cache, the second-level cache is associated with the session factory (or entity manager factory in JPA terms), so its contents are shared across transactions and concurrent sessions. Naturally, if a cached entity gets modified, the corresponding cache entry must be updated (or purged from the cache), too. As long as the data changes are done through Hibernate ORM, this is nothing to worry about: the ORM will update the cache automatically.

    Things get tricky, though, when bypassing the application, e.g. when modifying records directly in the database. Hibernate ORM then has no way of knowing that the cached data has become stale, and it’s necessary to invalidate the affected items explicitly. A common way for doing so is to foresee some admin functionality that allows to clear an application’s caches. For this to work, it’s vital to not forget about calling that invalidation functionality, or the application will keep working with outdated cached data.

    In the following we’re going to explore an alternative approach for cache invalidation, which works in a reliable and fully automated way: by employing Debezium and its change data capture (CDC) capabilities, you can track data changes in the database itself and react to any applied change. This allows to invalidate affected cache entries in near-realtime, without the risk of stale data due to missed changes. If an entry has been evicted from the cache, Hibernate ORM will load the latest version of the entity from the database the next time is requested.

    \ No newline at end of file diff --git a/blog/page/44/index.html b/blog/page/44/index.html index fdb76ec251..0e48697a10 100644 --- a/blog/page/44/index.html +++ b/blog/page/44/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    Updating external full text search indexes (e.g. Elasticsearch) after data changes is a very popular use case for change data capture (CDC).

    As we’ve discussed in a blog post a while ago, the combination of Debezium’s CDC source connectors and Confluent’s sink connector for Elasticsearch makes it straight forward to capture data changes in MySQL, Postgres etc. and push them towards Elasticsearch in near real-time. This results in a 1:1 relationship between tables in the source database and a corresponding search index in Elasticsearch, which is perfectly fine for many use cases.

    It gets more challenging though if you’d like to put entire aggregates into a single index. An example could be a customer and all their addresses; those would typically be stored in two separate tables in an RDBMS, linked by a foreign key, whereas you’d like to have just one index in Elasticsearch, containing documents of customers with their addresses embedded, allowing you to efficiently search for customers based on their address.

    Following up to the KStreams-based solution to this we described recently, we’d like to present in this post an alternative for materializing such aggregate views driven by the application layer.

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    Most of the times Debezium is used to stream data changes into Apache Kafka. What though if you’re using another streaming platform such as Apache Pulsar or a cloud-based solution such as Amazon Kinesis, Azure Event Hubs and the like? Can you still benefit from Debezium’s powerful change data capture (CDC) capabilities and ingest changes from databases such as MySQL, Postgres, SQL Server etc.?

    Turns out, with just a bit of glue code, you can! In the following we’ll discuss how to use Debezium to capture changes in a MySQL database and stream the change events into Kinesis, a fully-managed data streaming service available on the Amazon cloud.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    Updating external full text search indexes (e.g. Elasticsearch) after data changes is a very popular use case for change data capture (CDC).

    As we’ve discussed in a blog post a while ago, the combination of Debezium’s CDC source connectors and Confluent’s sink connector for Elasticsearch makes it straight forward to capture data changes in MySQL, Postgres etc. and push them towards Elasticsearch in near real-time. This results in a 1:1 relationship between tables in the source database and a corresponding search index in Elasticsearch, which is perfectly fine for many use cases.

    It gets more challenging though if you’d like to put entire aggregates into a single index. An example could be a customer and all their addresses; those would typically be stored in two separate tables in an RDBMS, linked by a foreign key, whereas you’d like to have just one index in Elasticsearch, containing documents of customers with their addresses embedded, allowing you to efficiently search for customers based on their address.

    Following up to the KStreams-based solution to this we described recently, we’d like to present in this post an alternative for materializing such aggregate views driven by the application layer.

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    Most of the times Debezium is used to stream data changes into Apache Kafka. What though if you’re using another streaming platform such as Apache Pulsar or a cloud-based solution such as Amazon Kinesis, Azure Event Hubs and the like? Can you still benefit from Debezium’s powerful change data capture (CDC) capabilities and ingest changes from databases such as MySQL, Postgres, SQL Server etc.?

    Turns out, with just a bit of glue code, you can! In the following we’ll discuss how to use Debezium to capture changes in a MySQL database and stream the change events into Kinesis, a fully-managed data streaming service available on the Amazon cloud.

    \ No newline at end of file diff --git a/blog/page/45/index.html b/blog/page/45/index.html index e42196235f..861fa5cebc 100644 --- a/blog/page/45/index.html +++ b/blog/page/45/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    Yesterday I had the opportunity to present Debezium and the idea of change data capture (CDC) to the Darmstadt Java User Group. It was a great evening with lots of interesting discussions and questions. One of the questions being the following: what is the advantage of using a log-based change data capturing tool such as Debezium over simply polling for updated records?

    So first of all, what’s the difference between the two approaches? With polling-based (or query-based) CDC you repeatedly run queries (e.g. via JDBC) for retrieving any newly inserted or updated rows from the tables to be captured. Log-based CDC in contrast works by reacting to any changes to the database’s log files (e.g. MySQL’s binlog or MongoDB’s op log).

    As this wasn’t the first time this question came up, I thought I could provide a more extensive answer also here on the blog. That way I’ll be able to refer to this post in the future, should the question come up again :)

    So without further ado, here’s my list of five advantages of log-based CDC over polling-based approaches.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    Yesterday I had the opportunity to present Debezium and the idea of change data capture (CDC) to the Darmstadt Java User Group. It was a great evening with lots of interesting discussions and questions. One of the questions being the following: what is the advantage of using a log-based change data capturing tool such as Debezium over simply polling for updated records?

    So first of all, what’s the difference between the two approaches? With polling-based (or query-based) CDC you repeatedly run queries (e.g. via JDBC) for retrieving any newly inserted or updated rows from the tables to be captured. Log-based CDC in contrast works by reacting to any changes to the database’s log files (e.g. MySQL’s binlog or MongoDB’s op log).

    As this wasn’t the first time this question came up, I thought I could provide a more extensive answer also here on the blog. That way I’ll be able to refer to this post in the future, should the question come up again :)

    So without further ado, here’s my list of five advantages of log-based CDC over polling-based approaches.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    \ No newline at end of file diff --git a/blog/page/46/index.html b/blog/page/46/index.html index d415966698..22198da93b 100644 --- a/blog/page/46/index.html +++ b/blog/page/46/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    Last updated at Nov 21st 2018 (adjusted to new KSQL Docker images).

    Last year we have seen the inception of a new open-source project in the Apache Kafka universe, KSQL, which is a streaming SQL engine build on top of Kafka Streams. In this post, we are going to try out KSQL querying with data change events generated by Debezium from a MySQL database.

    As a source of data we will use the database and setup from our tutorial. The result of this exercise should be similar to the recent post about aggregation of events into domain driven aggregates.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    A user of the Debezium connector for MySQL informed us about a potential issue with the configuration of the connector’s internal database history topic, which may cause the deletion of parts of that topic (DBZ-663). Please continue reading if you’re using the Debezium MySQL connector in versions 0.7.3 or 0.7.4.

    Microservice-based architectures can be considered an industry trend and are thus often found in enterprise applications lately. One possible way to keep data synchronized across multiple services and their backing data stores is to make us of an approach called change data capture, or CDC for short.

    Essentially CDC allows to listen to any modifications which are occurring at one end of a data flow (i.e. the data source) and communicate them as change events to other interested parties or storing them into a data sink. Instead of doing this in a point-to-point fashion, it’s advisable to decouple this flow of events between data sources and data sinks. Such a scenario can be implemented based on Debezium and Apache Kafka with relative ease and effectively no coding.

    As an example, consider the following microservice-based architecture of an order management system:

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    Last updated at Nov 21st 2018 (adjusted to new KSQL Docker images).

    Last year we have seen the inception of a new open-source project in the Apache Kafka universe, KSQL, which is a streaming SQL engine build on top of Kafka Streams. In this post, we are going to try out KSQL querying with data change events generated by Debezium from a MySQL database.

    As a source of data we will use the database and setup from our tutorial. The result of this exercise should be similar to the recent post about aggregation of events into domain driven aggregates.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    A user of the Debezium connector for MySQL informed us about a potential issue with the configuration of the connector’s internal database history topic, which may cause the deletion of parts of that topic (DBZ-663). Please continue reading if you’re using the Debezium MySQL connector in versions 0.7.3 or 0.7.4.

    Microservice-based architectures can be considered an industry trend and are thus often found in enterprise applications lately. One possible way to keep data synchronized across multiple services and their backing data stores is to make us of an approach called change data capture, or CDC for short.

    Essentially CDC allows to listen to any modifications which are occurring at one end of a data flow (i.e. the data source) and communicate them as change events to other interested parties or storing them into a data sink. Instead of doing this in a point-to-point fashion, it’s advisable to decouple this flow of events between data sources and data sinks. Such a scenario can be implemented based on Debezium and Apache Kafka with relative ease and effectively no coding.

    As an example, consider the following microservice-based architecture of an order management system:

    \ No newline at end of file diff --git a/blog/page/47/index.html b/blog/page/47/index.html index 2390f812dc..41e69bbfc2 100644 --- a/blog/page/47/index.html +++ b/blog/page/47/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    \ No newline at end of file diff --git a/blog/page/48/index.html b/blog/page/48/index.html index b2b8c8689e..c8884c9f0b 100644 --- a/blog/page/48/index.html +++ b/blog/page/48/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Debezium’s project lead Gunnar Morling gave a few talks during recent Devoxx Belgium 2017. One of his talks was dedicated to Debezium and change data capture in general.

    If you are interested in those topics and you want to obtain a fast and simple introduction to it, do not hesitate and watch the talk. Batteries and demo included!

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Debezium’s project lead Gunnar Morling gave a few talks during recent Devoxx Belgium 2017. One of his talks was dedicated to Debezium and change data capture in general.

    If you are interested in those topics and you want to obtain a fast and simple introduction to it, do not hesitate and watch the talk. Batteries and demo included!

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    \ No newline at end of file diff --git a/blog/page/49/index.html b/blog/page/49/index.html index a2dd85ea97..11a2dd762e 100644 --- a/blog/page/49/index.html +++ b/blog/page/49/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    When I first learned about the Debezium project last year, I was very excited about it right away.

    I could see how this project would be very useful for many people out there and I was very impressed by the professional way it was set up: a solid architecture for change data capture based on Apache Kafka, a strong focus on robustness and correctness also in the case of failures, the overall idea of creating a diverse eco-system of CDC connectors. All that based on the principles of open source, combined with extensive documentation from day one, a friendly and welcoming web site and a great getting-started experience.

    So you can imagine that I was more than enthusiastic about the opportunity to take over the role of Debezium’s project lead. Debezium and CDC have close links to some data-centric projects I’ve been previously working on and also tie in with ideas I’ve been pursuing around CQRS, even sourcing and denormalization. As core member of the Hibernate team at Red Hat, I’ve implemented the initial Elasticsearch support for Hibernate Search (which deals with full-text index updates via JPA/Hibernate). I’ve also contributed to Hibernate OGM - a project which connects JPA and the world of NoSQL. One of the plans for OGM is to create a declarative denormalization engine for creating read models optimized for specific use cases. It will be very interesting to see how this plays together with the capabilities provided by Debezium.

    Just before I started the Debezium project in early 2016, Martin Kleppmann gave several presentations about turning the database inside out and how his Bottled Water project demonstrated the importantance that change data capture can play in using Kafka for stream processing. Then Kafka Connect was announced, and at that point it seemed obvious to me that Kafka Connect was the foundation upon which practical and reusable change data capture can be built. As these techniques and technologies were becoming more important to Red Hat, I was given the opportunity to start a new open source project and community around building great CDC connectors for a variety of databases management systems.

    Over the past few years, we have created Kafka Connect connectors for MySQL, then MongoDB, and most recently PostgreSQL. Each were initially limited and had a number of problems and issues, but over time more and more people have tried the connectors, asked questions, answered questions, mentioned Debezium on Twitter, tested connectors in their own environments, reported problems, fixed bugs, discussed limitations and potential new features, implemented enhancements and new features, improved the documentation, and wrote blog posts. Simply put, people with similar needs and interests have worked together and have formed a community. Additional connectors for Oracle and SQL Server are in the works, but could use some help to move things along more quickly.

    It’s really exciting to see how far we’ve come and how the Debezium community continues to evolve and grow. And it’s perhaps as good a time as any to hand the reigns over to someone else. In fact, after nearly 10 wonderful years at Red Hat, I’m making a bigger change and as of today am part of Confluent’s engineering team, where I expect to play a more active role in the broader Kafka community and more directly with Kafka Connect and Kafka Streams. I definitely plan to stay involved in the Debezium community, but will no longer be leading the project. That role will instead be filled by Gunnar Morling, who’s recently joined the Debezium community but has extensive experience in open source, the Hibernate community, and the Bean Validation specification effort. Gunnar is a great guy and an excellent developer, and will be an excellent lead for the Debezium community.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    When I first learned about the Debezium project last year, I was very excited about it right away.

    I could see how this project would be very useful for many people out there and I was very impressed by the professional way it was set up: a solid architecture for change data capture based on Apache Kafka, a strong focus on robustness and correctness also in the case of failures, the overall idea of creating a diverse eco-system of CDC connectors. All that based on the principles of open source, combined with extensive documentation from day one, a friendly and welcoming web site and a great getting-started experience.

    So you can imagine that I was more than enthusiastic about the opportunity to take over the role of Debezium’s project lead. Debezium and CDC have close links to some data-centric projects I’ve been previously working on and also tie in with ideas I’ve been pursuing around CQRS, even sourcing and denormalization. As core member of the Hibernate team at Red Hat, I’ve implemented the initial Elasticsearch support for Hibernate Search (which deals with full-text index updates via JPA/Hibernate). I’ve also contributed to Hibernate OGM - a project which connects JPA and the world of NoSQL. One of the plans for OGM is to create a declarative denormalization engine for creating read models optimized for specific use cases. It will be very interesting to see how this plays together with the capabilities provided by Debezium.

    Just before I started the Debezium project in early 2016, Martin Kleppmann gave several presentations about turning the database inside out and how his Bottled Water project demonstrated the importantance that change data capture can play in using Kafka for stream processing. Then Kafka Connect was announced, and at that point it seemed obvious to me that Kafka Connect was the foundation upon which practical and reusable change data capture can be built. As these techniques and technologies were becoming more important to Red Hat, I was given the opportunity to start a new open source project and community around building great CDC connectors for a variety of databases management systems.

    Over the past few years, we have created Kafka Connect connectors for MySQL, then MongoDB, and most recently PostgreSQL. Each were initially limited and had a number of problems and issues, but over time more and more people have tried the connectors, asked questions, answered questions, mentioned Debezium on Twitter, tested connectors in their own environments, reported problems, fixed bugs, discussed limitations and potential new features, implemented enhancements and new features, improved the documentation, and wrote blog posts. Simply put, people with similar needs and interests have worked together and have formed a community. Additional connectors for Oracle and SQL Server are in the works, but could use some help to move things along more quickly.

    It’s really exciting to see how far we’ve come and how the Debezium community continues to evolve and grow. And it’s perhaps as good a time as any to hand the reigns over to someone else. In fact, after nearly 10 wonderful years at Red Hat, I’m making a bigger change and as of today am part of Confluent’s engineering team, where I expect to play a more active role in the broader Kafka community and more directly with Kafka Connect and Kafka Streams. I definitely plan to stay involved in the Debezium community, but will no longer be leading the project. That role will instead be filled by Gunnar Morling, who’s recently joined the Debezium community but has extensive experience in open source, the Hibernate community, and the Bean Validation specification effort. Gunnar is a great guy and an excellent developer, and will be an excellent lead for the Debezium community.

    \ No newline at end of file diff --git a/blog/page/5/index.html b/blog/page/5/index.html index 1b794a53ba..d3f8634622 100644 --- a/blog/page/5/index.html +++ b/blog/page/5/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    \ No newline at end of file diff --git a/blog/page/50/index.html b/blog/page/50/index.html index c1febbac53..6accd5e5b5 100644 --- a/blog/page/50/index.html +++ b/blog/page/50/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    This post originally appeared on the WePay Engineering blog.

    Change data capture has been around for a while, but some recent developments in technology have given it new life. Notably, using Kafka as a backbone to stream your database data in realtime has become increasingly common.

    If you’re wondering why you might want to stream database changes into Kafka, I highly suggest reading The Hardest Part About Microservices: Your Data. At WePay, we wanted to integrate our microservices and downstream datastores with each other, so every system could get access to the data that it needed. We use Kafka as our data integration layer, so we needed a way to get our database data into it.

    Last year, Yelp’s engineering team published an excellent series of posts on their data pipeline. These included a discussion on how they stream MySQL data into Kafka. Their architecture involves a series of homegrown pieces of software to accomplish the task, notably schematizer and MySQL streamer. The write-up triggered a thoughtful post on Debezium’s blog about a proposed equivalent architecture using Kafka connect, Debezium, and Confluent’s schema registry. This proposed architecture is what we’ve been implementing at WePay, and this post describes how we leverage Debezium and Kafka connect to stream our MySQL databases into Kafka.

    With the recent Debezium release, we’re happy to announce that a new PostgreSQL connector has been added alongside the already existing MySQL and MongoDB connectors.

    We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    This post originally appeared on the WePay Engineering blog.

    Change data capture has been around for a while, but some recent developments in technology have given it new life. Notably, using Kafka as a backbone to stream your database data in realtime has become increasingly common.

    If you’re wondering why you might want to stream database changes into Kafka, I highly suggest reading The Hardest Part About Microservices: Your Data. At WePay, we wanted to integrate our microservices and downstream datastores with each other, so every system could get access to the data that it needed. We use Kafka as our data integration layer, so we needed a way to get our database data into it.

    Last year, Yelp’s engineering team published an excellent series of posts on their data pipeline. These included a discussion on how they stream MySQL data into Kafka. Their architecture involves a series of homegrown pieces of software to accomplish the task, notably schematizer and MySQL streamer. The write-up triggered a thoughtful post on Debezium’s blog about a proposed equivalent architecture using Kafka connect, Debezium, and Confluent’s schema registry. This proposed architecture is what we’ve been implementing at WePay, and this post describes how we leverage Debezium and Kafka connect to stream our MySQL databases into Kafka.

    With the recent Debezium release, we’re happy to announce that a new PostgreSQL connector has been added alongside the already existing MySQL and MongoDB connectors.

    We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

    \ No newline at end of file diff --git a/blog/page/51/index.html b/blog/page/51/index.html index fb655284ac..32565659be 100644 --- a/blog/page/51/index.html +++ b/blog/page/51/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

    Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

    MySQL 5.7 introduced a new data type for storing and working with JSON data. Clients can define tables with columns using the new JSON datatype, and they can store and read JSON data using SQL statements and new built-in JSON functions to construct JSON data from other relational columns, introspect the structure of JSON values, and search within and manipulate JSON data. It possible to define generated columns on tables whose values are computed from the JSON value in another column of the same table, and to then define indexes with those generated columns. Overall, this is really a very powerful feature in MySQL.

    Debezium’s MySQL connector will support the JSON datatype starting with the upcoming 0.3.4 release. JSON document, array, and scalar values will appear in change events as strings with io.debezium.data.json for the schema name. This will make it natural for consumers to work with JSON data. BTW, this is the same semantic schema type used by the MongoDB connector to represent JSON data.

    This sounds straightforward, and we hope it is. But implementing this required a fair amount of work. That’s because although MySQL exposes JSON data as strings to client applications, internally it stores all JSON data in a special binary form that allows the MySQL engine to efficiently access the JSON data in queries, JSON functions and generated columns. All JSON data appears in the binlog in this binary form as well, which meant that we had to parse the binary form ourselves if we wanted to extract the more useful string representation. Writing and testing this parser took a bit of time and effort, and ultimately we donated it to the excellent MySQL binlog client library that the connector uses internally to read the binlog events.

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

    Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

    MySQL 5.7 introduced a new data type for storing and working with JSON data. Clients can define tables with columns using the new JSON datatype, and they can store and read JSON data using SQL statements and new built-in JSON functions to construct JSON data from other relational columns, introspect the structure of JSON values, and search within and manipulate JSON data. It possible to define generated columns on tables whose values are computed from the JSON value in another column of the same table, and to then define indexes with those generated columns. Overall, this is really a very powerful feature in MySQL.

    Debezium’s MySQL connector will support the JSON datatype starting with the upcoming 0.3.4 release. JSON document, array, and scalar values will appear in change events as strings with io.debezium.data.json for the schema name. This will make it natural for consumers to work with JSON data. BTW, this is the same semantic schema type used by the MongoDB connector to represent JSON data.

    This sounds straightforward, and we hope it is. But implementing this required a fair amount of work. That’s because although MySQL exposes JSON data as strings to client applications, internally it stores all JSON data in a special binary form that allows the MySQL engine to efficiently access the JSON data in queries, JSON functions and generated columns. All JSON data appears in the binlog in this binary form as well, which meant that we had to parse the binary form ourselves if we wanted to extract the more useful string representation. Writing and testing this parser took a bit of time and effort, and ultimately we donated it to the excellent MySQL binlog client library that the connector uses internally to read the binlog events.

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    \ No newline at end of file diff --git a/blog/page/52/index.html b/blog/page/52/index.html index 35cb3fb564..beba77f529 100644 --- a/blog/page/52/index.html +++ b/blog/page/52/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    Although Debezium makes it easy to capture database changes and record them in Kafka, one of the more important decisions you have to make is how those change events will be serialized in Kafka. Every message in Kafka has a key and a value, and to Kafka these are opaque byte arrays. But when you set up Kafka Connect, you have to say how the Debezium event keys and values should be serialized to a binary form, and your consumers will also have to deserialize them back into a usable form.

    Debezium event keys and values are both structured, so JSON is certainly a reasonable option — it’s flexible, ubiquitous, and language agnostic, but on the other hand it’s quite verbose. One alternative is Avro, which is also flexible and language agnostic, but also faster and results in smaller binary representations. Using Avro requires a bit more setup effort on your part and some additional software, but the advantages are often worth it.

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    Although Debezium makes it easy to capture database changes and record them in Kafka, one of the more important decisions you have to make is how those change events will be serialized in Kafka. Every message in Kafka has a key and a value, and to Kafka these are opaque byte arrays. But when you set up Kafka Connect, you have to say how the Debezium event keys and values should be serialized to a binary form, and your consumers will also have to deserialize them back into a usable form.

    Debezium event keys and values are both structured, so JSON is certainly a reasonable option — it’s flexible, ubiquitous, and language agnostic, but on the other hand it’s quite verbose. One alternative is Avro, which is also flexible and language agnostic, but also faster and results in smaller binary representations. Using Avro requires a bit more setup effort on your part and some additional software, but the advantages are often worth it.

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

    \ No newline at end of file diff --git a/blog/page/53/index.html b/blog/page/53/index.html index 81a4722900..ff417e34ac 100644 --- a/blog/page/53/index.html +++ b/blog/page/53/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    Change data capture is a hot topic. Debezium’s goal is to make change data capture easy for multiple DBMSes, but admittedly we’re still a young open source project and so far we’ve only released a connector for MySQL with a connector for MongoDB that’s just around the corner. So it’s great to see how others are using and implementing change data capture. In this post, we’ll review Yelp’s approach and see how it is strikingly similar to Debezium’s MySQL connector.

    I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

    I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

    Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    Change data capture is a hot topic. Debezium’s goal is to make change data capture easy for multiple DBMSes, but admittedly we’re still a young open source project and so far we’ve only released a connector for MySQL with a connector for MongoDB that’s just around the corner. So it’s great to see how others are using and implementing change data capture. In this post, we’ll review Yelp’s approach and see how it is strikingly similar to Debezium’s MySQL connector.

    I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

    I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

    Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    \ No newline at end of file diff --git a/blog/page/54/index.html b/blog/page/54/index.html index 3e5673bbce..e17a9da5a8 100644 --- a/blog/page/54/index.html +++ b/blog/page/54/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    When our MySQL connector is reading the binlog of a MySQL server or cluster, it parses the DDL statements in the log and builds an in-memory model of each table’s schema as it evolves over time. This process is important because the connector generates events for each table using the definition of the table at the time of each event. We can’t use the database’s current schema, since it may have changed since the point in time (or position in the log) where the connector is reading.

    Parsing DDL of MySQL or any other major relational database can seem to be a daunting task. Usually each DBMS has a highly-customized SQL grammar, and although the data manipulation language (DML) statements are often fairly close the standards, the data definition language (DDL) statements are usually less so and involve more DBMS-specific features.

    So given this, why did we write our own DDL parser for MySQL? Let’s first look at what Debezium needs a DDL parser to do.

    As you may have noticed, we have a new website with documentation, a blog, and information about the Debezium community and how you can contribute. Let us know what you think, and contribute improvements.

    Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Now the good news — Debezium 0.1 is now available and includes several significant features:

    • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

    • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    When our MySQL connector is reading the binlog of a MySQL server or cluster, it parses the DDL statements in the log and builds an in-memory model of each table’s schema as it evolves over time. This process is important because the connector generates events for each table using the definition of the table at the time of each event. We can’t use the database’s current schema, since it may have changed since the point in time (or position in the log) where the connector is reading.

    Parsing DDL of MySQL or any other major relational database can seem to be a daunting task. Usually each DBMS has a highly-customized SQL grammar, and although the data manipulation language (DML) statements are often fairly close the standards, the data definition language (DDL) statements are usually less so and involve more DBMS-specific features.

    So given this, why did we write our own DDL parser for MySQL? Let’s first look at what Debezium needs a DDL parser to do.

    As you may have noticed, we have a new website with documentation, a blog, and information about the Debezium community and how you can contribute. Let us know what you think, and contribute improvements.

    Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Now the good news — Debezium 0.1 is now available and includes several significant features:

    • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

    • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

    \ No newline at end of file diff --git a/blog/page/6/index.html b/blog/page/6/index.html index 0c5c29c853..972b9cbed2 100644 --- a/blog/page/6/index.html +++ b/blog/page/6/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    \ No newline at end of file diff --git a/blog/page/7/index.html b/blog/page/7/index.html index dc717088a7..5edcdf0de2 100644 --- a/blog/page/7/index.html +++ b/blog/page/7/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    \ No newline at end of file diff --git a/blog/page/8/index.html b/blog/page/8/index.html index 6bacebb490..6cd840c082 100644 --- a/blog/page/8/index.html +++ b/blog/page/8/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    \ No newline at end of file diff --git a/blog/page/9/index.html b/blog/page/9/index.html index 901b1f765c..e3ce7d43dd 100644 --- a/blog/page/9/index.html +++ b/blog/page/9/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    \ No newline at end of file diff --git a/community/code-of-conduct/index.html b/community/code-of-conduct/index.html index 273f9e2af3..d41419667f 100644 --- a/community/code-of-conduct/index.html +++ b/community/code-of-conduct/index.html @@ -1 +1 @@ - Debezium Code of Conduct

    Debezium Code of Conduct

    Our Pledge

    In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.

    Our Standards

    Examples of behavior that contributes to creating a positive environment include:

    • Using welcoming and inclusive language

    • Being respectful of differing viewpoints and experiences

    • Gracefully accepting constructive criticism

    • Focusing on what is best for the community

    • Showing empathy towards other community members

    Examples of unacceptable behavior by participants include:

    • The use of sexualized language or imagery and unwelcome sexual attention or advances

    • Trolling, insulting/derogatory comments, and personal or political attacks

    • Public or private harassment

    • Publishing others' private information, such as a physical or electronic address, without explicit permission

    • Other conduct which could reasonably be considered inappropriate in a professional setting

    Our Responsibilities

    Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.

    Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.

    Scope

    This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.

    Enforcement

    Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at debezium@gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.

    Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership.

    Attribution

    This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at https://contributor-covenant.org/version/1/4.

    \ No newline at end of file + Debezium Code of Conduct

    Debezium Code of Conduct

    Our Pledge

    In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.

    Our Standards

    Examples of behavior that contributes to creating a positive environment include:

    • Using welcoming and inclusive language

    • Being respectful of differing viewpoints and experiences

    • Gracefully accepting constructive criticism

    • Focusing on what is best for the community

    • Showing empathy towards other community members

    Examples of unacceptable behavior by participants include:

    • The use of sexualized language or imagery and unwelcome sexual attention or advances

    • Trolling, insulting/derogatory comments, and personal or political attacks

    • Public or private harassment

    • Publishing others' private information, such as a physical or electronic address, without explicit permission

    • Other conduct which could reasonably be considered inappropriate in a professional setting

    Our Responsibilities

    Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.

    Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.

    Scope

    This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.

    Enforcement

    Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at debezium@gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.

    Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership.

    Attribution

    This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at https://contributor-covenant.org/version/1/4.

    \ No newline at end of file diff --git a/community/contribute/index.html b/community/contribute/index.html index 010cfbe9f7..50c3797538 100644 --- a/community/contribute/index.html +++ b/community/contribute/index.html @@ -1 +1 @@ - Contribute to Debezium

    Contribute to Debezium

    We’re an active open source software community. We welcome and value contributions from everyone, and have adopted a code of conduct for our community.

    Follow us

    We frequently post to Twitter about important goings on in the community. We also write on our blog about features, plans, and related technology. Follow both to keep track of what’s going on with Debezium.

    Talk to us

    There are two ways you can talk with our community about development related topics:

    Suggest improvements

    See a problem or think of something that we could do better? Check our issues to see if it’s already been mentioned or discussed. If it has, join the conversation to add your take. Otherwise, go ahead and create an issue with more detail than you think is necessary.

    Code

    All of our code is in the Debezium Organization on GitHub, and currently we have several repositories.

    1. Get your development environment set up with the latest code, following the instructions in CONTRIBUTE.md.

    2. Check our open issues, especially those with the help_wanted label.

    3. Look at previous pull requests to learn our coding style and conventions.

    4. Ask fellow developers for guidance if you’re not sure how to proceed, get stuck, or simply need confirmation that you’re headed in the right direction.

    5. Make your changes and create a pull request, following our guidelines.

    Website

    Our website is also a community effort, and we welcome suggestions, fixes, improvements, and even new blog posts related to Debezium. Everything for our website is also in on GitHub. Follow the instructions in CONTRIBUTE.md to get your environment set up with the latest source and to make your changes.

    Governance

    Debezium is an open source project owned and sponsored by Red Hat. The Debezium name and logo are trademarks of Red Hat.

    The Debezium project is operated as a community-centric open source project. While Red Hat product management has a voice, it is akin to the same voice of any member of the community, whether they contribute code, bug reports, bug fixes or documentation.

    \ No newline at end of file + Contribute to Debezium

    Contribute to Debezium

    We’re an active open source software community. We welcome and value contributions from everyone, and have adopted a code of conduct for our community.

    Follow us

    We frequently post to Twitter about important goings on in the community. We also write on our blog about features, plans, and related technology. Follow both to keep track of what’s going on with Debezium.

    Talk to us

    There are two ways you can talk with our community about development related topics:

    Suggest improvements

    See a problem or think of something that we could do better? Check our issues to see if it’s already been mentioned or discussed. If it has, join the conversation to add your take. Otherwise, go ahead and create an issue with more detail than you think is necessary.

    Code

    All of our code is in the Debezium Organization on GitHub, and currently we have several repositories.

    1. Get your development environment set up with the latest code, following the instructions in CONTRIBUTE.md.

    2. Check our open issues, especially those with the help_wanted label.

    3. Look at previous pull requests to learn our coding style and conventions.

    4. Ask fellow developers for guidance if you’re not sure how to proceed, get stuck, or simply need confirmation that you’re headed in the right direction.

    5. Make your changes and create a pull request, following our guidelines.

    Website

    Our website is also a community effort, and we welcome suggestions, fixes, improvements, and even new blog posts related to Debezium. Everything for our website is also in on GitHub. Follow the instructions in CONTRIBUTE.md to get your environment set up with the latest source and to make your changes.

    Governance

    Debezium is an open source project owned and sponsored by Red Hat. The Debezium name and logo are trademarks of Red Hat.

    The Debezium project is operated as a community-centric open source project. While Red Hat product management has a voice, it is akin to the same voice of any member of the community, whether they contribute code, bug reports, bug fixes or documentation.

    \ No newline at end of file diff --git a/community/index.html b/community/index.html index 73bcee1db7..a1229c5acb 100644 --- a/community/index.html +++ b/community/index.html @@ -1 +1 @@ - Community.Debezium

    Debezium is an open source project,
    and we welcome everyone that wants to help to make
    it the best open source platform for change data capture.


    "How can I help?"

    You can contribute in multiple ways: by using Debezium, asking or answering questions, reporting issues, writing documentation, fixing bugs, discussing plans, and developing new features. Set up your local environment, star our GitHub repositories, and check out our open issues.

    Learn how

    "Where can I ask questions?"

    Our documentation is full of useful information, and you might find an answer to your question there. If not, jump into our chat room for users or join our Google Group. Our user chat room may be useful if you're interested in the in-depth inner workings of Debezium.

    "I'm using Debezium!"

    We love to hear how people are using Debezium, so let us know on Twitter @debezium or mention it in the Google Group. Include a link to your project, website, or blog post. We'd also be very happy to to add you to our growing list of Debezium production users.

    \ No newline at end of file + Community.Debezium

    Debezium is an open source project,
    and we welcome everyone that wants to help to make
    it the best open source platform for change data capture.


    "How can I help?"

    You can contribute in multiple ways: by using Debezium, asking or answering questions, reporting issues, writing documentation, fixing bugs, discussing plans, and developing new features. Set up your local environment, star our GitHub repositories, and check out our open issues.

    Learn how

    "Where can I ask questions?"

    Our documentation is full of useful information, and you might find an answer to your question there. If not, jump into our chat room for users or join our Google Group. Our user chat room may be useful if you're interested in the in-depth inner workings of Debezium.

    "I'm using Debezium!"

    We love to hear how people are using Debezium, so let us know on Twitter @debezium or mention it in the Google Group. Include a link to your project, website, or blog post. We'd also be very happy to to add you to our growing list of Debezium production users.

    \ No newline at end of file diff --git a/community/users/index.html b/community/users/index.html index 7361ea600c..a5aa2c5254 100644 --- a/community/users/index.html +++ b/community/users/index.html @@ -1 +1 @@ - Who's Using Debezium?

    Who's Using Debezium?

    Debezium is used in production by a wide range of companies and organizations. This list contains users of Debezium who agreed to serve as public reference; where available, further resources with more details are linked.

    If your organization would like to be added to (or removed from) this list, please send a pull request for updating the source of this page (please keep the list in alphabetic order).

    • Airwallex

    • Alza

    • Auto Trader UK

    • Behalf (details)

    • Bajaj Finserv Health

    • Bolt (details)

    • Convoy (details)

    • Delhivery (details)

    • DeviantArt (more details)

    • EOS Technology Solutions

    • Experience

    • Flipkart

    • Fampay

    • Getir

    • GoHealth

    • Gorgias

    • Hepsiburada

    • Hipages

    • JW Player (details)

    • Kenshoo

    • Klabin S.A.

    • Lendingkart Tech

    • MEGOGO

    • Myntra (details)

    • Okta (Auth0) (details)

    • OYO

    • Pipedrive

    • Reddit (details)

    • Segment (used with ctlstore)

    • Shippeo (details)

    • Shopify

    • SquadStack

    • Strava

    • SugarCRM

    • Synaltic (details in french)

    • Tokopedia

    • Traveloka

    • Trendyol (details)

    • TrueAccord

    • Ubisoft

    • Usabilla by Surveymonkey

    • Via Varejo (details)

    • Vimeo

    • WePay, Inc. (details, more details)

    • Zalando

    • Zepto

    • Zomato

    • ZoloStays

    • …​ and you? Then let us know and get added to the list, too. Thanks!

    \ No newline at end of file + Who's Using Debezium?

    Who's Using Debezium?

    Debezium is used in production by a wide range of companies and organizations. This list contains users of Debezium who agreed to serve as public reference; where available, further resources with more details are linked.

    If your organization would like to be added to (or removed from) this list, please send a pull request for updating the source of this page (please keep the list in alphabetic order).

    • Airwallex

    • Alza

    • Auto Trader UK

    • Behalf (details)

    • Bajaj Finserv Health

    • Bolt (details)

    • Convoy (details)

    • Delhivery (details)

    • DeviantArt (more details)

    • EOS Technology Solutions

    • Experience

    • Flipkart

    • Fampay

    • Getir

    • GoHealth

    • Gorgias

    • Hepsiburada

    • Hipages

    • JW Player (details)

    • Kenshoo

    • Klabin S.A.

    • Lendingkart Tech

    • MEGOGO

    • Myntra (details)

    • Okta (Auth0) (details)

    • OYO

    • Pipedrive

    • Reddit (details)

    • Segment (used with ctlstore)

    • Shippeo (details)

    • Shopify

    • SquadStack

    • Strava

    • SugarCRM

    • Synaltic (details in french)

    • Tokopedia

    • Traveloka

    • Trendyol (details)

    • TrueAccord

    • Ubisoft

    • Usabilla by Surveymonkey

    • Via Varejo (details)

    • Vimeo

    • WePay, Inc. (details, more details)

    • Zalando

    • Zepto

    • Zomato

    • ZoloStays

    • …​ and you? Then let us know and get added to the list, too. Thanks!

    \ No newline at end of file diff --git a/docs/amq-streams/index.html b/docs/amq-streams/index.html index d070054113..56d7a9af63 100644 --- a/docs/amq-streams/index.html +++ b/docs/amq-streams/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/architecture/index.html b/docs/architecture/index.html index 4c26e78427..d9c1b13e06 100644 --- a/docs/architecture/index.html +++ b/docs/architecture/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/code-of-conduct/index.html b/docs/code-of-conduct/index.html index 81399dee8d..8faa3f5b23 100644 --- a/docs/code-of-conduct/index.html +++ b/docs/code-of-conduct/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/configuration/avro/index.html b/docs/configuration/avro/index.html index c32fcb58ad..847ffe9be2 100644 --- a/docs/configuration/avro/index.html +++ b/docs/configuration/avro/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/configuration/event-flattening/index.html b/docs/configuration/event-flattening/index.html index c0121d1705..4e64c2ef9a 100644 --- a/docs/configuration/event-flattening/index.html +++ b/docs/configuration/event-flattening/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/configuration/logging/index.html b/docs/configuration/logging/index.html index 4d758926db..fa1199e5bc 100644 --- a/docs/configuration/logging/index.html +++ b/docs/configuration/logging/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/configuration/mongodb-event-flattening/index.html b/docs/configuration/mongodb-event-flattening/index.html index 415557bb8b..416a15c5fd 100644 --- a/docs/configuration/mongodb-event-flattening/index.html +++ b/docs/configuration/mongodb-event-flattening/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/configuration/outbox-event-router/index.html b/docs/configuration/outbox-event-router/index.html index d12a9c8cf0..d28fa95210 100644 --- a/docs/configuration/outbox-event-router/index.html +++ b/docs/configuration/outbox-event-router/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/configuration/topic-routing/index.html b/docs/configuration/topic-routing/index.html index 39e68b269f..28e4feb859 100644 --- a/docs/configuration/topic-routing/index.html +++ b/docs/configuration/topic-routing/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/connectors/cassandra/index.html b/docs/connectors/cassandra/index.html index ab87f0d865..bfd287b835 100644 --- a/docs/connectors/cassandra/index.html +++ b/docs/connectors/cassandra/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/connectors/index.html b/docs/connectors/index.html index 63a62ebcea..8593a4d82a 100644 --- a/docs/connectors/index.html +++ b/docs/connectors/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/connectors/mongodb/index.html b/docs/connectors/mongodb/index.html index aa3d74a023..6b89208a64 100644 --- a/docs/connectors/mongodb/index.html +++ b/docs/connectors/mongodb/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/connectors/mysql/index.html b/docs/connectors/mysql/index.html index 30ef905dba..2a04e9c5d6 100644 --- a/docs/connectors/mysql/index.html +++ b/docs/connectors/mysql/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/connectors/oracle/index.html b/docs/connectors/oracle/index.html index a4c96676a4..7ce900417f 100644 --- a/docs/connectors/oracle/index.html +++ b/docs/connectors/oracle/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/connectors/postgresql/index.html b/docs/connectors/postgresql/index.html index 85c4c06564..50d8a843d2 100644 --- a/docs/connectors/postgresql/index.html +++ b/docs/connectors/postgresql/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/connectors/sqlserver/index.html b/docs/connectors/sqlserver/index.html index 70d85f3aa4..a8a4243395 100644 --- a/docs/connectors/sqlserver/index.html +++ b/docs/connectors/sqlserver/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/contribute/index.html b/docs/contribute/index.html index c2c5e6f833..9abc1a4e1a 100644 --- a/docs/contribute/index.html +++ b/docs/contribute/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/embedded/index.html b/docs/embedded/index.html index 70dd838714..743152bbea 100644 --- a/docs/embedded/index.html +++ b/docs/embedded/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/faq/index.html b/docs/faq/index.html index 4f861058e6..45e7237dba 100644 --- a/docs/faq/index.html +++ b/docs/faq/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/features/index.html b/docs/features/index.html index 25335d48d7..b93f02ca32 100644 --- a/docs/features/index.html +++ b/docs/features/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index b11dccac2b..350dc1d18a 100644 --- a/docs/index.html +++ b/docs/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/install/development/index.html b/docs/install/development/index.html index 5c86165640..dd75824bf3 100644 --- a/docs/install/development/index.html +++ b/docs/install/development/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/install/postgres-plugins/index.html b/docs/install/postgres-plugins/index.html index d424e39d3b..d6e4355ddf 100644 --- a/docs/install/postgres-plugins/index.html +++ b/docs/install/postgres-plugins/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/install/stable/index.html b/docs/install/stable/index.html index 5c86165640..dd75824bf3 100644 --- a/docs/install/stable/index.html +++ b/docs/install/stable/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/monitoring/index.html b/docs/monitoring/index.html index a4aff42ee3..363d5df8af 100644 --- a/docs/monitoring/index.html +++ b/docs/monitoring/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/mysql/index.html b/docs/mysql/index.html index 30ef905dba..2a04e9c5d6 100644 --- a/docs/mysql/index.html +++ b/docs/mysql/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/online-resources/index.html b/docs/online-resources/index.html index 622e5f7bf0..871e22ade1 100644 --- a/docs/online-resources/index.html +++ b/docs/online-resources/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/openshift/index.html b/docs/openshift/index.html index d69be00cd3..1ae3150414 100644 --- a/docs/openshift/index.html +++ b/docs/openshift/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/releases/index.html b/docs/releases/index.html index 47979d4bc5..9a075fb80b 100644 --- a/docs/releases/index.html +++ b/docs/releases/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/roadmap/index.html b/docs/roadmap/index.html index ad50760ada..1b37261ed0 100644 --- a/docs/roadmap/index.html +++ b/docs/roadmap/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/docs/tutorial/index.html b/docs/tutorial/index.html index b0a2eda062..a4e943e3c6 100644 --- a/docs/tutorial/index.html +++ b/docs/tutorial/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/documentation/architecture/index.html b/documentation/architecture/index.html index 27c367ae18..380ba11a47 100644 --- a/documentation/architecture/index.html +++ b/documentation/architecture/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/documentation/faq/index.html b/documentation/faq/index.html index d28b3466cb..75f33ae866 100644 --- a/documentation/faq/index.html +++ b/documentation/faq/index.html @@ -32,4 +32,4 @@ ... </transformers> ... -</configuration>

    Alternatively, if you use the Maven Assembly plug-in, you can use the metaInf-services container descriptor handlers.

    \ No newline at end of file +</configuration>

    Alternatively, if you use the Maven Assembly plug-in, you can use the metaInf-services container descriptor handlers.

    \ No newline at end of file diff --git a/documentation/features/index.html b/documentation/features/index.html index eb3e900e07..b9aba29ada 100644 --- a/documentation/features/index.html +++ b/documentation/features/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/documentation/index.html b/documentation/index.html index 904e254572..8c57f68b11 100644 --- a/documentation/index.html +++ b/documentation/index.html @@ -1 +1 @@ - Reference Documentation

    Reference Documentation

    Series

    stable

    3.0

    2024-12-18
    Snapshot field enumration in source block provides additional cases; Signals in-progress are not reprocessed upon connector restart; SQL Server is tested with transaprent data encryption; Added support for PostgreSQL 17 failover slots; JDBC offset/history can be configured in Operator CRD; Support for ad-hoc snapshot for tables whose schema was not captured

    stable

    2.7

    2024-12-11
    Oracle connector DML parser can ignore errors; Int/bigint arrays are supported in reselect postprocessor; MongoDB metrics are aligned with realtiona connectors

    Other Documentation

    Make sure to check the FAQ. You can also find quite a lot of valuable insight about Debezium all across the web. We have compiled a list of such articles here.
    \ No newline at end of file + Reference Documentation

    Reference Documentation

    Series

    stable

    3.0

    2024-12-19
    Revert MySQL/MariaDB grammar refactorings

    stable

    2.7

    2024-12-11
    Oracle connector DML parser can ignore errors; Int/bigint arrays are supported in reselect postprocessor; MongoDB metrics are aligned with realtiona connectors

    Other Documentation

    Make sure to check the FAQ. You can also find quite a lot of valuable insight about Debezium all across the web. We have compiled a list of such articles here.
    \ No newline at end of file diff --git a/documentation/install/stable/index.html b/documentation/install/stable/index.html index 6bb3fb3339..5d572b704a 100644 --- a/documentation/install/stable/index.html +++ b/documentation/install/stable/index.html @@ -1 +1 @@ - Redirecting...
    \ No newline at end of file + Redirecting...
    \ No newline at end of file diff --git a/documentation/online-resources/index.html b/documentation/online-resources/index.html index 68f3666e6b..344ed05aed 100644 --- a/documentation/online-resources/index.html +++ b/documentation/online-resources/index.html @@ -1 +1 @@ - Resources on the Web

    Resources on the Web

    A compilation of blog posts, slide sets, recordings and other online resources around Debezium. Most of the resources are in English; you can find a collection of resources in other languages like Portuguese or French towards the end of this page.

    You’ve written or spoken about Debezium and would like to have your post or talk listed here? That’s great, let us know by sharing the link in our forum. Or better yet, just add the link to the source of this page yourself and send a pull request against the debezium.github.io repo. Thanks!

    Presentations, Session Recordings and Videos

    Blog Posts & Articles

    Non-English Resources

    🇪🇸 Spanish

    🇩🇪 German

    🇧🇷 Portuguese

    🌏 Other

    \ No newline at end of file + Resources on the Web

    Resources on the Web

    A compilation of blog posts, slide sets, recordings and other online resources around Debezium. Most of the resources are in English; you can find a collection of resources in other languages like Portuguese or French towards the end of this page.

    You’ve written or spoken about Debezium and would like to have your post or talk listed here? That’s great, let us know by sharing the link in our forum. Or better yet, just add the link to the source of this page yourself and send a pull request against the debezium.github.io repo. Thanks!

    Presentations, Session Recordings and Videos

    Blog Posts & Articles

    Non-English Resources

    🇪🇸 Spanish

    🇩🇪 German

    🇧🇷 Portuguese

    🌏 Other

    \ No newline at end of file diff --git a/documentation/sitemap.xml b/documentation/sitemap.xml index f6e2711460..dbed7a9248 100644 --- a/documentation/sitemap.xml +++ b/documentation/sitemap.xml @@ -2,2262 +2,2262 @@ https://debezium.io/documentation/reference/stable/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/configuration/notification.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/index-sink.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/informix.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/jdbc.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/mariadb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/spanner.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/post-processors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/post-processors/reselect-columns.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/convert-cloudevent-to-saveable-form.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/decode-logical-decoding-message-content.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/event-changes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/header-to-value.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/partition-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/schema-change-event-filter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/timescaledb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/timezone-converter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/vitess-filter-transaction-topic-records.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/vitess-remove-field.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/transformations/vitess-use-local-vgtid.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/stable/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/configuration/notification.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/index-sink.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/informix.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/jdbc.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/mariadb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/spanner.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/post-processors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/post-processors/reselect-columns.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/convert-cloudevent-to-saveable-form.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/decode-logical-decoding-message-content.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/event-changes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/header-to-value.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/partition-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/schema-change-event-filter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/timescaledb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/timezone-converter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/vitess-filter-transaction-topic-records.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/vitess-remove-field.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/transformations/vitess-use-local-vgtid.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/configuration/notification.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/index-sink.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/informix.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/jdbc.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/mariadb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/spanner.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/post-processors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/post-processors/reselect-columns.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/convert-cloudevent-to-saveable-form.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/event-changes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/header-to-value.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/partition-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/schema-change-event-filter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/timescaledb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/timezone-converter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/vitess-filter-transaction-topic-records.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/vitess-remove-field.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/transformations/vitess-use-local-vgtid.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.7/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/configuration/notification.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/informix.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/jdbc.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/spanner.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/post-processors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/post-processors/reselect-columns.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/convert-cloudevent-to-saveable-form.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/event-changes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/header-to-value.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/partition-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/schema-change-event-filter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/timescaledb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/timezone-converter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.6/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/configuration/notification.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/informix.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/jdbc.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/spanner.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/post-processors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/post-processors/reselect-columns.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/convert-cloudevent-to-saveable-form.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/event-changes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/header-to-value.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/partition-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/schema-change-event-filter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/timescaledb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/timezone-converter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.5/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/configuration/notification.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/jdbc.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/spanner.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/operations/embedded.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/compute-partition.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/event-changes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/header-to-value.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/partition-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/schema-change-event-filter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/timescaledb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/timezone-converter.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.4/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/configuration/notification.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/jdbc.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/spanner.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/operations/embedded.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/compute-partition.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/event-changes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/header-to-value.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/partition-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.3/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/jdbc.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/spanner.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/operations/embedded.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/compute-partition.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/event-changes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/header-to-value.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/partition-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.2/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/spanner.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/operations/embedded.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/event-changes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/partition-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.1/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/operations/embedded.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/2.0/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/architecture.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/configuration/avro.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/configuration/signalling.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/configuration/topic-auto-create-config.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/connectors/cassandra.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/connectors/db2.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/connectors/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/connectors/mongodb.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/connectors/mysql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/connectors/oracle.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/connectors/postgresql.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/connectors/sqlserver.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/connectors/vitess.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/development/converters.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/development/engine.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/docker.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/features.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/install.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/integrations/cloudevents.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/integrations/outbox.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/integrations/serdes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/integrations/testcontainers.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/integrations/tracing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/operations/debezium-server.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/operations/debezium-ui.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/operations/embedded.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/operations/kubernetes.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/operations/logging.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/operations/monitoring.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/operations/openshift.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/postgres-plugins.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/transformations/applying-transformations-selectively.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/transformations/content-based-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/transformations/event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/transformations/filtering.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/transformations/index.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/transformations/mongodb-event-flattening.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/transformations/mongodb-outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/transformations/outbox-event-router.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/transformations/topic-routing.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z https://debezium.io/documentation/reference/1.9/tutorial.html -2024-12-19T14:44:55.428Z +2024-12-19T14:44:56.854Z diff --git a/feed.xml b/feed.xml index fd8684ed49..72247c0103 100644 --- a/feed.xml +++ b/feed.xml @@ -1,4 +1,4 @@ -Jekyll2024-12-19T14:44:58+00:00https://debezium.io/feed.xmlDebeziumDebezium is an open source distributed platform for change data capture. Start it up, point it at your databases, and your apps can start responding to all of the inserts, updates, and deletes that other apps commit to your databases. Debezium is durable and fast, so your apps can respond quickly and never miss an event, even when things go wrong.Debezium 3.0.5.Final Released2024-12-18T00:00:00+00:002024-12-18T00:00:00+00:00https://debezium.io/blog/2024/12/18/debezium-3-0-5-final-released

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    Breaking Changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Kafka Signal Source

    Debezium was reprocessing Kafka-based signals on connector restarts, which could introduce unpredictable behavior with unintended side effects. As a result, this feature has been removed, and if a connector stops, then the signal must be re-sent (DBZ-7856).

    Change Event Source Info Block

    The snapshot enumeration in the source information block in Debezium’s change events was extended to include all possible cases (DBZ-8496).

    New features and improvements

    Debezium 3.0.5.Final introduces a couple of improvements and features, lets take a look at each individually.

    Core

    Allow ad-hoc blocking snapshots on unknown tables

    While we recommend using the default configurations for schema management for all tables, we understand that isn’t always possible for every environment. A common question we often see is how does one snapshot newly added tables, particularly if the connector is configured using store.only.captured.tables.ddl as true.

    In this update, we have simplified the process where you can update your connector configuration with the new table, and once the connector task has restarted, sending an ad-hoc blocking snapshot signal will be permitted even on tables that are unknown but match the capture filters (DBZ-4903).

    Snapshot dispatch failure handling improvements

    When processing change events, users can control how the connector reacts to specific event handling failures using event.processing.failure.handling.mode; however, this has traditionally only been applicable for streaming changes.

    There are some corner cases where emitting snapshot events could also raise failures. To improve this experience, the event.processing.failure.handling.mode now influences failures when dispatching snapshot events, too (DBZ-8433).

    Connector startup configuration logging improved

    When a Debezium source connector starts, it logs all connector configurations. This is very helpful for both the team when diagnosing problems but also for users to identify that their configurations are being understood properly.

    In this update, the logging for the connector configuration has changed slightly. You will notice rather than placing each configuration property as a new log entry, each key/value will be appended together separated by new lines, and added to the log as a single entry. This makes it easier to identify these log lines and for tooling to exclude such log entries easier if needed (DBZ-8472).

    Postgres

    Support for PostgreSQL 17 fail-over replication slots

    PostgreSQL 17 introduces a long awaited feature called fail-over for replication slots.

    When the replication slot is created, a new property can be specified to enable the replication slot to be created and available on the fail-over replica. The synchronization of the fail-over replication slot can be performed manually by calling pg_sync_replication_slots() or automatically by enabling the sync_replication_slots feature.

    When automatic synchronization is enabled, this allows Debezium to consume from that slot immediately on fail-over to the replica and not miss any events.

    To take advantage of this new feature, your connector must be configured with slot.failover set to true. Debezium must be connecting to PostgreSQL 17+ and the primary on the cluster, otherwise no fail-over replication slot will be created (DBZ-8412).

    Oracle

    New metrics to track partial rollback events

    A partial rollback event is something unique to Oracle where a user performs an operation, it gets recorded as being performed in the transaction logs, but then a validation check forces the operation to be undone and rolled back. One of the most notable ways to trigger partial rollbacks are with constraint violations.

    The new JMX metric, NumberOfPartialRollbackCount, tracks the frequency of this event sequence in the transaction logs (DBZ-8491).

    The increase in this metric should be rare, and having a few occurrences throughout the day should pose no major problem.

    When you notice this metric increases frequently in a small window of time, this means you could have a poorly written script where a job is relying on constraint violations for some logic. While the connector can handle these use cases, it’s important to understand this creates unnecessary transaction log activity that can directly impact the speed and latency of streaming changes.

    Skip GoldenGate replication markers

    When using Debezium to capture changes from an Oracle database that interacts with Oracle GoldenGate, you may have observed some situations where the low watermark in the offsets did not advance across restarts. This was especially the case when setting lob.enabled to true.

    In this update, the Debezium Oracle connector will now skip GoldenGate’s replication marker events, which will prevent those synthetic transactions from cluttering the transaction buffer and forcing users to rely on transaction retention (DBZ-8533).

    Vitess

    Unparseable DDL handling improvements

    Several new improvements have been added to Vitess to make the parsing and handling of DDL changes more fault-tolerant (DBZ-8479). These improvements include:

    • Only parsing DDL events if that specific table is tracked in the schema history

    • Strip comments to mimic parse failures

    • Make table and type fault tolerance to gracefully handle edge cases

    K8s Operator

    Support JDBC offset/history configurations DBZ-8501

    We have improved the Kubernetes Operator by introducing support for the JDBC storage module, allowing it to be configured via the CRD.

    In the offset schema reference, a new property section called jdbc has been added, which describes the JDBC offset backing store properties. In the schema history reference, a new property section called jdbc has been added, which describes the JDBC schema history store properties.

    This allows users to store offsets and schema history details for a connector deployed using the Debezium Operator in a JDBC data store easily (DBZ-8501).

    Other fixes

    In total there were 43 issues resolved in Debezium 3.0.5.Final. The list of changes can also be found in our release notes.

    Here are some noteworthy changes:

    • Create smoke test to make sure Debezium Server container image works DBZ-3226

    • Error with debezium.sink.pulsar.client.serviceUrl and debezium-server DBZ-3720

    • MySQL regression - Defaults store.only.captured.tables.ddl to true DBZ-6709

    • ExtractNewRecordState value of optional null field which has default value DBZ-7094

    • DebeziumException: No column '' where ' found in table DBZ-8034

    • Align MySQL and MariaDB grammars with upstream versions DBZ-8270

    • MySQL Connector Does Not Act On CREATE DATABASE Records In The Binlog DBZ-8291

    • Vgtid doesn’t contain multiple shard GTIDs when multiple tasks are used DBZ-8432

    • Support MongoDB 8.0 DBZ-8451

    • Update description of message.key.columns and format admonitions in PG doc DBZ-8455

    • Object ID cache may fail with concurrent modification exception DBZ-8465

    • Add Basic validation in UI to check for form completion before submitting. DBZ-8474

    • Use schema evolution tool to manage the conductor database DBZ-8486

    • Oracle gathers and logs object attributes for views unnecessarily DBZ-8492

    • ReselectColumnPostProcessor can throw ORA-01003 "no statement parsed" when using fallback non-flashback area query DBZ-8493

    • Oracle DDL ALTER TABLE ADD CONSTRAINT fails to be parsed DBZ-8494

    • Edit Source/Destination on adding new configuration properties its removing old once DBZ-8495

    • Invalid property name in JDBC Schema History DBZ-8500

    • Fix the URL in Pipeline log page DBZ-8502

    • Failed to start LogMiner mining session due to "Required Start SCN" error message DBZ-8503

    • Oracle data pump TEMPLATE_TABLE clause not supported DBZ-8504

    • Postgres alpine images require lang/llvm 19 for build DBZ-8505

    • Update Quarkus Outbox Extension to Quarkus 3.17.3 DBZ-8506

    • Merge conductor and stage into single platform repository DBZ-8508

    • TimezoneConverter include.list should be respected if set DBZ-8514

    • Missing log classes debezium-platform-conductor DBZ-8515

    • Debezium Server fails to start when using the sink Kinesis DBZ-8517

    • Skip GoldenGate REPLICATION MARKER events DBZ-8533

    ]]>
    Chris Cranford
    Debezium 3.0.2.Final Released2024-11-18T00:00:00+00:002024-11-18T00:00:00+00:00https://debezium.io/blog/2024/11/18/debezium-3-0-2-final-released

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    New features and improvements

    Debezium 3.0.2.Final introduces a couple of improvements and features, lets take a look at each individually.

    Core

    Perform blocking snapshots with never snapshot mode

    The Debezium blocking snapshot process is designed to execute the initial snapshot based on the signal provided, selectively emitting the historical data for one or more tables. When this was paired with the never snapshot mode, this lead to unexpected behavior.

    In this release, we modified the connector offsets to track the configured snapshot.mode, allowing the blocking snapshot to succeed and perform the initial snapshot when signaled, even if the snapshot.mode is configured to never perform a snapshot. This allows users to safely use this feature with this configuration (DBZ-7903).

    Due to the connector offset storage change, once the connector is upgraded to 3.0.2.Final or later, the connector cannot be downgraded to 3.0.1.Final or earlier.

    MongoDB

    RowsChanged JMX metric type changed

    In previous builds of the MongoDB connector, the RowsChanged JMX metric is exposed as a java.util.Map, which contradicts the same JMX metric exposed on relational connectors, which is TabularData. This has been fixed in 3.0.2.Final, the JMX metric uses TabularData across all connector implementations for uniformity (DBZ-8359).

    Any existing MongoDB JMX pipelines may need to be adjusted if you were previously capturing RowsChanged.

    Oracle

    Higher precision with timestamps

    Debezium for Oracle has traditionally emitted column timestamp values with millisecond precision that is controlled by the NLS session properties set on the mining session connection. The precision is improved and provides nanosecond-based (aka FF9) values (DBZ-8379).

    The emitted field type is based on the column’s data type, so field emitted data types remain unchanged. What will change is cases where columns have micro or nanosecond-based values, where these were previously zero, they’ll now have non-zero values.

    Warn or skip DML exceptions

    The event.processing.failure.handling.mode can be configured to fail, warn, or skip specific connector error conditions to improve connector reliability to various data issues. THis configuration is historically used to control how the Oracle connector behaves when a DDL failure is observed.

    In this release, the event.processing.failure.handling.mode is also used to control failures for DML-based events. If there was an issue with the Oracle connector parsing your insert, update, or delete operations, you can safely configure the connector to fail, warn, or skip the DML event based on your needs (DBZ-8208).

    The default behavior is to always fail when an event is not safely handled by the connector. By adjusting this to warn or skip, while the connector will safely continue past the failed event, you will introduce data loss and will needs to be addressed manually.

    Vitess

    Performance improvements

    In earlier builds of the Debezium for Vitess connector, the connector used a regular expression-based filter system that matches all tables based on a prefix with varying suffixes, and later exclusions would be applied based on configuration. This has the potential to waste CPU and create hotspots due to creating intermediate objects for the event that would later to be filtered and garbage collected.

    In this release, we’ve improved the way the Vitess connector processes this use case by applying the filtration earlier in the event processing chain. This should reduce the number of intermediate objects created and improve the overall performance of the connector. For key spaces that have the same prefix and differing suffixes, this should provide better overall performance than older builds (DBZ-8354).

    Sink connectors

    Debezium 0.x introduced a common source-connector framework that has become the foundation for source connectors provided by the project, including our community-led connectors such as Spanner, Vitess, and others. With the introduction of the MongoDB sink connector recently, our long-term goal is to approach sink connectors in a similar way, providing a common sink-connector framework to ease the creation of Debezium-based sink connectors.

    Over Debezium 3.x lifecycle, you will see incremental steps to streamline the source across the JDBC and MongoDB sink connectors. We will minimize disruptions in maintenance releases as you have come to expect, but expect future major and minor releases to introduce deprecations and changes to support this endeavor.

    In this first round of changes, we’ve introduced a new Debezium module: debezium-sink. This module acts as the foundation for all sink connectors and is home to a variety of common classes, including the SinkConnectorConfig class, naming strategy implementations, and the common representation of a sink record, DebeziumSinkRecord.

    As we continue to streamline the MongoDB and JDBC sink connectors, additional common behavior will be added.

    JDBC sink connector changes

    With the sink module using the naming convention of collection rather than table, several configuration properties have been deprecated and replaced. The old properties will continue to work in Debezium 3.0.x builds; however will be removed in Debezium 3.1.

    • The table.name.format property is replaced by collection.name.format.

    • The table.naming.strategy property is replaced by collection.naming.strategy.

    In addition, the contract for io.debezium.connector.jdbc.naming.TableNamingStrategy specified by the table.naming.strategy property is deprecated. A new io.debezium.sink.naming.CollectionNamingStrategy has been introduced with a slightly different signature.

    TableNamingStrategy contract
        /**
    +Jekyll2024-12-19T14:44:59+00:00https://debezium.io/feed.xmlDebeziumDebezium is an open source distributed platform for change data capture. Start it up, point it at your databases, and your apps can start responding to all of the inserts, updates, and deletes that other apps commit to your databases. Debezium is durable and fast, so your apps can respond quickly and never miss an event, even when things go wrong.Debezium 3.0.5.Final Released2024-12-18T00:00:00+00:002024-12-18T00:00:00+00:00https://debezium.io/blog/2024/12/18/debezium-3-0-5-final-released 

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    Breaking Changes

    The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable.

    Kafka Signal Source

    Debezium was reprocessing Kafka-based signals on connector restarts, which could introduce unpredictable behavior with unintended side effects. As a result, this feature has been removed, and if a connector stops, then the signal must be re-sent (DBZ-7856).

    Change Event Source Info Block

    The snapshot enumeration in the source information block in Debezium’s change events was extended to include all possible cases (DBZ-8496).

    New features and improvements

    Debezium 3.0.5.Final introduces a couple of improvements and features, lets take a look at each individually.

    Core

    Allow ad-hoc blocking snapshots on unknown tables

    While we recommend using the default configurations for schema management for all tables, we understand that isn’t always possible for every environment. A common question we often see is how does one snapshot newly added tables, particularly if the connector is configured using store.only.captured.tables.ddl as true.

    In this update, we have simplified the process where you can update your connector configuration with the new table, and once the connector task has restarted, sending an ad-hoc blocking snapshot signal will be permitted even on tables that are unknown but match the capture filters (DBZ-4903).

    Snapshot dispatch failure handling improvements

    When processing change events, users can control how the connector reacts to specific event handling failures using event.processing.failure.handling.mode; however, this has traditionally only been applicable for streaming changes.

    There are some corner cases where emitting snapshot events could also raise failures. To improve this experience, the event.processing.failure.handling.mode now influences failures when dispatching snapshot events, too (DBZ-8433).

    Connector startup configuration logging improved

    When a Debezium source connector starts, it logs all connector configurations. This is very helpful for both the team when diagnosing problems but also for users to identify that their configurations are being understood properly.

    In this update, the logging for the connector configuration has changed slightly. You will notice rather than placing each configuration property as a new log entry, each key/value will be appended together separated by new lines, and added to the log as a single entry. This makes it easier to identify these log lines and for tooling to exclude such log entries easier if needed (DBZ-8472).

    Postgres

    Support for PostgreSQL 17 fail-over replication slots

    PostgreSQL 17 introduces a long awaited feature called fail-over for replication slots.

    When the replication slot is created, a new property can be specified to enable the replication slot to be created and available on the fail-over replica. The synchronization of the fail-over replication slot can be performed manually by calling pg_sync_replication_slots() or automatically by enabling the sync_replication_slots feature.

    When automatic synchronization is enabled, this allows Debezium to consume from that slot immediately on fail-over to the replica and not miss any events.

    To take advantage of this new feature, your connector must be configured with slot.failover set to true. Debezium must be connecting to PostgreSQL 17+ and the primary on the cluster, otherwise no fail-over replication slot will be created (DBZ-8412).

    Oracle

    New metrics to track partial rollback events

    A partial rollback event is something unique to Oracle where a user performs an operation, it gets recorded as being performed in the transaction logs, but then a validation check forces the operation to be undone and rolled back. One of the most notable ways to trigger partial rollbacks are with constraint violations.

    The new JMX metric, NumberOfPartialRollbackCount, tracks the frequency of this event sequence in the transaction logs (DBZ-8491).

    The increase in this metric should be rare, and having a few occurrences throughout the day should pose no major problem.

    When you notice this metric increases frequently in a small window of time, this means you could have a poorly written script where a job is relying on constraint violations for some logic. While the connector can handle these use cases, it’s important to understand this creates unnecessary transaction log activity that can directly impact the speed and latency of streaming changes.

    Skip GoldenGate replication markers

    When using Debezium to capture changes from an Oracle database that interacts with Oracle GoldenGate, you may have observed some situations where the low watermark in the offsets did not advance across restarts. This was especially the case when setting lob.enabled to true.

    In this update, the Debezium Oracle connector will now skip GoldenGate’s replication marker events, which will prevent those synthetic transactions from cluttering the transaction buffer and forcing users to rely on transaction retention (DBZ-8533).

    Vitess

    Unparseable DDL handling improvements

    Several new improvements have been added to Vitess to make the parsing and handling of DDL changes more fault-tolerant (DBZ-8479). These improvements include:

    • Only parsing DDL events if that specific table is tracked in the schema history

    • Strip comments to mimic parse failures

    • Make table and type fault tolerance to gracefully handle edge cases

    K8s Operator

    Support JDBC offset/history configurations DBZ-8501

    We have improved the Kubernetes Operator by introducing support for the JDBC storage module, allowing it to be configured via the CRD.

    In the offset schema reference, a new property section called jdbc has been added, which describes the JDBC offset backing store properties. In the schema history reference, a new property section called jdbc has been added, which describes the JDBC schema history store properties.

    This allows users to store offsets and schema history details for a connector deployed using the Debezium Operator in a JDBC data store easily (DBZ-8501).

    Other fixes

    In total there were 43 issues resolved in Debezium 3.0.5.Final. The list of changes can also be found in our release notes.

    Here are some noteworthy changes:

    • Create smoke test to make sure Debezium Server container image works DBZ-3226

    • Error with debezium.sink.pulsar.client.serviceUrl and debezium-server DBZ-3720

    • MySQL regression - Defaults store.only.captured.tables.ddl to true DBZ-6709

    • ExtractNewRecordState value of optional null field which has default value DBZ-7094

    • DebeziumException: No column '' where ' found in table DBZ-8034

    • Align MySQL and MariaDB grammars with upstream versions DBZ-8270

    • MySQL Connector Does Not Act On CREATE DATABASE Records In The Binlog DBZ-8291

    • Vgtid doesn’t contain multiple shard GTIDs when multiple tasks are used DBZ-8432

    • Support MongoDB 8.0 DBZ-8451

    • Update description of message.key.columns and format admonitions in PG doc DBZ-8455

    • Object ID cache may fail with concurrent modification exception DBZ-8465

    • Add Basic validation in UI to check for form completion before submitting. DBZ-8474

    • Use schema evolution tool to manage the conductor database DBZ-8486

    • Oracle gathers and logs object attributes for views unnecessarily DBZ-8492

    • ReselectColumnPostProcessor can throw ORA-01003 "no statement parsed" when using fallback non-flashback area query DBZ-8493

    • Oracle DDL ALTER TABLE ADD CONSTRAINT fails to be parsed DBZ-8494

    • Edit Source/Destination on adding new configuration properties its removing old once DBZ-8495

    • Invalid property name in JDBC Schema History DBZ-8500

    • Fix the URL in Pipeline log page DBZ-8502

    • Failed to start LogMiner mining session due to "Required Start SCN" error message DBZ-8503

    • Oracle data pump TEMPLATE_TABLE clause not supported DBZ-8504

    • Postgres alpine images require lang/llvm 19 for build DBZ-8505

    • Update Quarkus Outbox Extension to Quarkus 3.17.3 DBZ-8506

    • Merge conductor and stage into single platform repository DBZ-8508

    • TimezoneConverter include.list should be respected if set DBZ-8514

    • Missing log classes debezium-platform-conductor DBZ-8515

    • Debezium Server fails to start when using the sink Kinesis DBZ-8517

    • Skip GoldenGate REPLICATION MARKER events DBZ-8533

    ]]>Chris CranfordDebezium 3.0.2.Final Released2024-11-18T00:00:00+00:002024-11-18T00:00:00+00:00https://debezium.io/blog/2024/11/18/debezium-3-0-2-final-released

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    New features and improvements

    Debezium 3.0.2.Final introduces a couple of improvements and features, lets take a look at each individually.

    Core

    Perform blocking snapshots with never snapshot mode

    The Debezium blocking snapshot process is designed to execute the initial snapshot based on the signal provided, selectively emitting the historical data for one or more tables. When this was paired with the never snapshot mode, this lead to unexpected behavior.

    In this release, we modified the connector offsets to track the configured snapshot.mode, allowing the blocking snapshot to succeed and perform the initial snapshot when signaled, even if the snapshot.mode is configured to never perform a snapshot. This allows users to safely use this feature with this configuration (DBZ-7903).

    Due to the connector offset storage change, once the connector is upgraded to 3.0.2.Final or later, the connector cannot be downgraded to 3.0.1.Final or earlier.

    MongoDB

    RowsChanged JMX metric type changed

    In previous builds of the MongoDB connector, the RowsChanged JMX metric is exposed as a java.util.Map, which contradicts the same JMX metric exposed on relational connectors, which is TabularData. This has been fixed in 3.0.2.Final, the JMX metric uses TabularData across all connector implementations for uniformity (DBZ-8359).

    Any existing MongoDB JMX pipelines may need to be adjusted if you were previously capturing RowsChanged.

    Oracle

    Higher precision with timestamps

    Debezium for Oracle has traditionally emitted column timestamp values with millisecond precision that is controlled by the NLS session properties set on the mining session connection. The precision is improved and provides nanosecond-based (aka FF9) values (DBZ-8379).

    The emitted field type is based on the column’s data type, so field emitted data types remain unchanged. What will change is cases where columns have micro or nanosecond-based values, where these were previously zero, they’ll now have non-zero values.

    Warn or skip DML exceptions

    The event.processing.failure.handling.mode can be configured to fail, warn, or skip specific connector error conditions to improve connector reliability to various data issues. THis configuration is historically used to control how the Oracle connector behaves when a DDL failure is observed.

    In this release, the event.processing.failure.handling.mode is also used to control failures for DML-based events. If there was an issue with the Oracle connector parsing your insert, update, or delete operations, you can safely configure the connector to fail, warn, or skip the DML event based on your needs (DBZ-8208).

    The default behavior is to always fail when an event is not safely handled by the connector. By adjusting this to warn or skip, while the connector will safely continue past the failed event, you will introduce data loss and will needs to be addressed manually.

    Vitess

    Performance improvements

    In earlier builds of the Debezium for Vitess connector, the connector used a regular expression-based filter system that matches all tables based on a prefix with varying suffixes, and later exclusions would be applied based on configuration. This has the potential to waste CPU and create hotspots due to creating intermediate objects for the event that would later to be filtered and garbage collected.

    In this release, we’ve improved the way the Vitess connector processes this use case by applying the filtration earlier in the event processing chain. This should reduce the number of intermediate objects created and improve the overall performance of the connector. For key spaces that have the same prefix and differing suffixes, this should provide better overall performance than older builds (DBZ-8354).

    Sink connectors

    Debezium 0.x introduced a common source-connector framework that has become the foundation for source connectors provided by the project, including our community-led connectors such as Spanner, Vitess, and others. With the introduction of the MongoDB sink connector recently, our long-term goal is to approach sink connectors in a similar way, providing a common sink-connector framework to ease the creation of Debezium-based sink connectors.

    Over Debezium 3.x lifecycle, you will see incremental steps to streamline the source across the JDBC and MongoDB sink connectors. We will minimize disruptions in maintenance releases as you have come to expect, but expect future major and minor releases to introduce deprecations and changes to support this endeavor.

    In this first round of changes, we’ve introduced a new Debezium module: debezium-sink. This module acts as the foundation for all sink connectors and is home to a variety of common classes, including the SinkConnectorConfig class, naming strategy implementations, and the common representation of a sink record, DebeziumSinkRecord.

    As we continue to streamline the MongoDB and JDBC sink connectors, additional common behavior will be added.

    JDBC sink connector changes

    With the sink module using the naming convention of collection rather than table, several configuration properties have been deprecated and replaced. The old properties will continue to work in Debezium 3.0.x builds; however will be removed in Debezium 3.1.

    • The table.name.format property is replaced by collection.name.format.

    • The table.naming.strategy property is replaced by collection.naming.strategy.

    In addition, the contract for io.debezium.connector.jdbc.naming.TableNamingStrategy specified by the table.naming.strategy property is deprecated. A new io.debezium.sink.naming.CollectionNamingStrategy has been introduced with a slightly different signature.

    TableNamingStrategy contract
        /**
          * Resolves the logical table name from the sink record.
          *
          * @param config sink connector configuration, should not be {@code null}
    diff --git a/foundation/faq/index.html b/foundation/faq/index.html
    index 016875dba7..82a4d494dd 100644
    --- a/foundation/faq/index.html
    +++ b/foundation/faq/index.html
    @@ -1 +1 @@
    -   Debezium at Commonhaus FAQ                        

    Debezium at Commonhaus FAQ

    Why move to a foundation?

    Enable us to welcome more contributions from a diverse range of developers and organisations.

    Which Debezium projects as moving to a foundation?

    All projects under the Debezium GitHub organization.

    What is Commonhaus?

    Why Commonhaus?

    The main reason for choosing Commonhaus over another foundation was its governance framework, which emphasises project self-governance. See, in particular, its guiding principles.

    Why not move to any other foundation?

    See Commonhaus’ own FAQ.

    What does the move consist of exactly?

    The move consists of donating the Debezium-related intellectual property owned by Red Hat. The donated intellectual property includes in particular:

    • Debezium trademark

    • All related domain names

    This does not include copyright, since Red Hat does not have exclusive claims to copyright on the Debezium code.

    Is Red Hat reducing the funding for Debezium development?

    No, not all.

    How is Red Hat’s involvement in Debezium changing?

    Red Hat’s involvement in Debezium remains unchanged and as committed as ever.

    The Debezium team is expecting increased collaboration between Red Hat and other organizations, which may require setting time aside to onboard new contributors and collaborators.

    What is changing in practice for contributors to the Debezium project?

    Commonhaus requires Debezium to adhere to its policies, including in particular its intellectual property policy, and trademark policy.

    This is great news for you all, as it legally binds the project to remain open-source forever. Commonhaus also requires having a code of conduct, and provides a recommended one, but each project can freely customize the details as needed.

    Debezium contributors can opt to become Commonhaus Members; this is not a requirement to participate in the Debezium project, but is a requirement to help define Commonhaus.

    The Debezium project’s core contributors, internal governance and processes remain the same.

    \ No newline at end of file + Debezium at Commonhaus FAQ

    Debezium at Commonhaus FAQ

    Why move to a foundation?

    Enable us to welcome more contributions from a diverse range of developers and organisations.

    Which Debezium projects as moving to a foundation?

    All projects under the Debezium GitHub organization.

    What is Commonhaus?

    Why Commonhaus?

    The main reason for choosing Commonhaus over another foundation was its governance framework, which emphasises project self-governance. See, in particular, its guiding principles.

    Why not move to any other foundation?

    See Commonhaus’ own FAQ.

    What does the move consist of exactly?

    The move consists of donating the Debezium-related intellectual property owned by Red Hat. The donated intellectual property includes in particular:

    • Debezium trademark

    • All related domain names

    This does not include copyright, since Red Hat does not have exclusive claims to copyright on the Debezium code.

    Is Red Hat reducing the funding for Debezium development?

    No, not all.

    How is Red Hat’s involvement in Debezium changing?

    Red Hat’s involvement in Debezium remains unchanged and as committed as ever.

    The Debezium team is expecting increased collaboration between Red Hat and other organizations, which may require setting time aside to onboard new contributors and collaborators.

    What is changing in practice for contributors to the Debezium project?

    Commonhaus requires Debezium to adhere to its policies, including in particular its intellectual property policy, and trademark policy.

    This is great news for you all, as it legally binds the project to remain open-source forever. Commonhaus also requires having a code of conduct, and provides a recommended one, but each project can freely customize the details as needed.

    Debezium contributors can opt to become Commonhaus Members; this is not a requirement to participate in the Debezium project, but is a requirement to help define Commonhaus.

    The Debezium project’s core contributors, internal governance and processes remain the same.

    \ No newline at end of file diff --git a/index.html b/index.html index 369f2a8be6..b28d1f3680 100644 --- a/index.html +++ b/index.html @@ -1 +1 @@ - Debezium

    Debezium

    Stream changes from your database.

    Debezium is an open source distributed platform for change data capture. Start it up, point it at your databases, and your apps can start responding to all of the inserts, updates, and deletes that other apps commit to your databases. Debezium is durable and fast, so your apps can respond quickly and never miss an event, even when things go wrong.

    Latest stable (3.0)
    Try our tutorial






    Do more with your data

    Your data is always changing. Debezium lets your apps react every time your data changes, and you don't have to change your apps that modify the data. Debezium continuously monitors your databases and lets any of your applications stream every row-level change in the same order they were committed to the database. Use the event streams to purge a cache, update search indexes, generate derived views and data, keep other data sources in sync, and much more. In fact, pull that functionality out of your app and into separate services.




    Simplify your apps

    Since Debezium can monitor your data, why have one app update the database and update search indexes and send notifications and publish messages? Doing that correctly - especially when things go wrong - is really tough, and if you get it wrong the data in those system may become inconsistent. Keep things simple, and move that extra functionality into separate services that use Debezium.








    Never miss a beat

    Take your apps and services down for maintenance, and Debezium keeps monitoring so that when your apps come back up they'll continue exactly where they left off. No matter what, Debezium keeps the events in the same order they were made to the database. And Debezium makes sure that you always see every event, even when things go wrong.




    React quickly

    When all things are running smoothly, Debezium is fast. And that means your apps and services can react quickly. Debezium is built on top of Apache Kafka, which is proven, scalable, and handles very large volumes of data very quickly.







    \ No newline at end of file + Debezium

    Debezium

    Stream changes from your database.

    Debezium is an open source distributed platform for change data capture. Start it up, point it at your databases, and your apps can start responding to all of the inserts, updates, and deletes that other apps commit to your databases. Debezium is durable and fast, so your apps can respond quickly and never miss an event, even when things go wrong.

    Latest stable (3.0)
    Try our tutorial






    Do more with your data

    Your data is always changing. Debezium lets your apps react every time your data changes, and you don't have to change your apps that modify the data. Debezium continuously monitors your databases and lets any of your applications stream every row-level change in the same order they were committed to the database. Use the event streams to purge a cache, update search indexes, generate derived views and data, keep other data sources in sync, and much more. In fact, pull that functionality out of your app and into separate services.




    Simplify your apps

    Since Debezium can monitor your data, why have one app update the database and update search indexes and send notifications and publish messages? Doing that correctly - especially when things go wrong - is really tough, and if you get it wrong the data in those system may become inconsistent. Keep things simple, and move that extra functionality into separate services that use Debezium.








    Never miss a beat

    Take your apps and services down for maintenance, and Debezium keeps monitoring so that when your apps come back up they'll continue exactly where they left off. No matter what, Debezium keeps the events in the same order they were made to the database. And Debezium makes sure that you always see every event, even when things go wrong.




    React quickly

    When all things are running smoothly, Debezium is fast. And that means your apps and services can react quickly. Debezium is built on top of Apache Kafka, which is proven, scalable, and handles very large volumes of data very quickly.







    \ No newline at end of file diff --git a/license/index.html b/license/index.html index 51eb91faa1..f71b108eb9 100644 --- a/license/index.html +++ b/license/index.html @@ -1 +1 @@ - Debezium License

    Debezium License

    All code of the Debezium project is licensed under the Apache License 2.0. The website is licensed under Creative Commons Attribution-ShareAlike 4.0.

    The Debezium software is copyright 2021 by Red Hat, Inc. The Debezium name, logo and other marks are trademarks of Red Hat, Inc. and may not be used without prior written permission.

    \ No newline at end of file + Debezium License

    Debezium License

    All code of the Debezium project is licensed under the Apache License 2.0. The website is licensed under Creative Commons Attribution-ShareAlike 4.0.

    The Debezium software is copyright 2021 by Red Hat, Inc. The Debezium name, logo and other marks are trademarks of Red Hat, Inc. and may not be used without prior written permission.

    \ No newline at end of file diff --git a/releases/0.10/index.html b/releases/0.10/index.html index a35a88b569..a28596f1ac 100644 --- a/releases/0.10/index.html +++ b/releases/0.10/index.html @@ -1 +1 @@ - Debezium Release Series 0.10

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.13
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 4.0
    Driver: 3.10.1
    PostgreSQL Database: 9.6, 10, 11
    Driver: 42.2.8
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file + Debezium Release Series 0.10

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.13
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 4.0
    Driver: 3.10.1
    PostgreSQL Database: 9.6, 10, 11
    Driver: 42.2.8
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file diff --git a/releases/0.10/release-notes.html b/releases/0.10/release-notes.html index 496bd456af..620a121b82 100644 --- a/releases/0.10/release-notes.html +++ b/releases/0.10/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 0.10

    Release Notes for Debezium 0.10

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 0.10.0.Final (October 2nd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Final from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Debezium Postgres replication with pgoutput plugin sending events slowly for non-batched insertions DBZ-1515

    • ExtractNewRecordState access operation field before checking message format DBZ-1517

    Other changes

    This release includes also other changes:

    • Go back to original PG 10 container image for testing DBZ-1504

    • Support delete propagation in end-to-end demo DBZ-1506

    • Update Unwrap/UnwrapMongoDB SMT demos to use latest Debezium and delete event support DBZ-1516

    Release 0.10.0.CR2 (September 26th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.CR2 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.CR2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.CR2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The data type MicroDuration for representing INTERVAL columns (as supported by the Postgres and Oracle connectors) has been changed to use int64 rather than float64. The reason being that there are no fractional microseconds values expected. For cases where the microseconds of an interval would overflow int64, there’ll be an alternative String-based mapping be provided in a future Debezium release, which will allow to exactly represent interval values based on their year, month, day etc. parts (see DBZ-1498).

    The behavior of unchanged TOASTed columns has changed in this release (see DBZ-1367). Please upgrade the PostgreSQL connector in conjunction with the Decoderbufs plugin to guarantee that these columns are handled correctly. Please refer to the PostgreSQL connector documentation for more information on unchanged TOASTed columns.

    New Features

    • Allow user to customize key for DB tables through configuration DBZ-1015

    • Replace Custom Schema with Pluggable Serializers via KC Schema in Cassandra Connector DBZ-1405

    • Porting insert fields from source struct feature to ExtractNewDocumentState SMT DBZ-1442

    • Add column_id column to metadata section in messages in Kafka topic DBZ-1483

    Fixes

    This release includes the following fixes:

    • Cannot use Avro for fields with dash in name DBZ-1044

    • Detection of unsupported include-unchanged-toast parameter is failing DBZ-1399

    • Possible issue with Debezium not properly shutting down PG connections during Connect rebalance DBZ-1426

    • Common error when PG connector cannot connect is confusing DBZ-1427

    • Postgres connector does not honor publication.name configuration DBZ-1436

    • Wrong interrupt handling DBZ-1438

    • CREATE DATABASE and TABLE statements do not support DEFAULT charset DBZ-1470

    • Avoid NPE at runtime in EventRouter when incorrect configuration is given. DBZ-1495

    • java.time.format.DateTimeParseException: java.time.format.DateTimeParseException DBZ-1501

    Other changes

    This release includes also other changes:

    • Publish container images to quay.io DBZ-1178

    • Document installation of DecoderBufs plug-in via RPM on Fedora DBZ-1286

    • Fix intermittendly failing Postgres tests DBZ-1383

    • Add MongoDB 4.2 to testing matrix DBZ-1389

    • Upgrade to latest Postgres driver DBZ-1462

    • Use old SMT name in 0.9 docs DBZ-1471

    • Speak of "primary" and "secondary" nodes in the Postgres docs DBZ-1472

    • PostgreSQL snapshot.mode connector option description should include 'exported' DBZ-1473

    • Update example tutorial to show using Avro configuration at connector level DBZ-1474

    • Upgrade protobuf to version 3.8.0 DBZ-1475

    • Logging can be confusing when using fallback replication stream methods DBZ-1479

    • Remove info on when an option was introduced from the docs DBZ-1493

    • Unstable Mysql connector Integration test (shouldProcessCreateUniqueIndex) DBZ-1500

    • Update PostgreSQL documentation DBZ-1503

    • DocumentTest#shouldCreateArrayFromValues() fails on Windows DBZ-1508

    Release 0.10.0.CR1 (September 10th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.CR1 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The ProtoBuf library use by PostgreSQL plugin has been upgraded.

    SQL Server connector now supports Kafka Connect’s temporal datatypes. At the same time the default temporal mode is no longer adaptive_time_microseconds but adaptive. Mode adaptive_time_microseconds is no longer supported.

    New Features

    • Replace YAML Dependency with Property File in Cassandra Connector DBZ-1406

    • Support Connect date/time precision DBZ-1419

    • Exported snapshots are supported by PostgreSQL 9.4+ DBZ-1440

    • Enhance Postgresql & Mysql Docker example images with some Spatial geometry DBZ-1459

    Fixes

    This release includes the following fixes:

    • Date conversion broken if date more than 3000 year DBZ-949

    • Overflowed Timestamp in Postgres Connection DBZ-1205

    • Debezium does not expect a year larger than 9999 DBZ-1255

    • ExportedSnapshotter and InitialOnlySnapshotter should not always execute a snapshot. DBZ-1437

    • Source Fields Not Present on Delete Rewrite DBZ-1448

    • NPE raises when a new connector has nothing to commit DBZ-1457

    • MongoDB connector throws NPE on "op=n" DBZ-1464

    Other changes

    This release includes also other changes:

    • Upgrade ProtoBuf dependency DBZ-1390

    • Engine does not stop on Exception DBZ-1431

    • Create "architecture" and "feature" pages DBZ-1458

    Release 0.10.0.Beta4 (August 16th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Beta4 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Beta4 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Beta4 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The default format of the message values produced by the outbox event router has been changed. It will solely contain the value of the payload column by default. In order to add the eventType value that previously was part of the message value, use the "additional field" configuration option with a placement option of envelope. In this case, the message value will be a complex structure containing the payload key and one additional key for each further field.

    New Features

    • Implement a CDC connector for Apache Cassandra DBZ-607

    • Support "Exported Snapshots" feature for taking lockless snapshots with Postgres DBZ-1035

    • Snapshot Order of tables DBZ-1254

    • Add ability to insert fields from source struct in ExtractNewRecordState SMT DBZ-1395

    Fixes

    This release includes the following fixes:

    • Debezium for MySQL fails on GRANT DELETE ON <table> DBZ-1411

    • Debezium for MySQL tries to flush a table for a database not in the database whitelist DBZ-1414

    • Table scan is performed anyway even if snapshot.mode is set to initial_schema_only DBZ-1417

    • SMT ExtractNewDocumentState does not support Heartbeat events DBZ-1430

    • Postgres connector does not honor publication.name configuration DBZ-1436

    Other changes

    This release includes also other changes:

    • Issue with debezium embedded documentation DBZ-393

    • Refactor Postgres connector to be based on new framework classes DBZ-777

    • Don’t obtain new connection each time when getting xmin position DBZ-1381

    • Unify handling of attributes in EventRouter SMT DBZ-1385

    • DockerHub: show container specific README files DBZ-1387

    • Remove unused dependencies from Cassandra connector DBZ-1424

    • Simplify custom engine name parsing grammar DBZ-1432

    Release 0.10.0.Beta3 (July 23rd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Beta3 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Beta3 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Beta3 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The value of heartbeat messages has been changed, it now contains a field with the timestamp of the heartbeat. Note that the message format of heartbeat messages is considered an implementation detail of Debezium, i.e. its format may be altered incompatibly and consumers should not rely on any specific format.

    New Features

    • Handle tables without primary keys DBZ-916

    • Define exposed connector metrics in MySQL DBZ-1120

    • Set heartbeat interval for the binlog reader DBZ-1338

    • Outbox router should skip heartbeat messages by default DBZ-1388

    • Introduce number ofEventsInError metric DBZ-1222

    • Add option to skip table locks when snapshotting DBZ-1238

    • Explore built-in logical decoding added in Postgres 10 DBZ-766

    • Support deletion events in the outbox routing SMT DBZ-1320

    • Expose metric for progress of DB history recovery DBZ-1356

    Fixes

    This release includes the following fixes:

    • Incorrect offset may be committed despite unparseable DDL statements DBZ-599

    • SavePoints are getting stored in history topic DBZ-794

    • delete message "op:d" on tables with unique combination of 2 primary keys = (composite keys) , the d records are not sent DBZ-1180

    • When a MongoDB collection haven’t had activity for a period of time an initial sync is triggered DBZ-1198

    • Restore compatibility with Kafka 1.x DBZ-1361

    • no viable alternative at input 'LOCK DEFAULT' DBZ-1376

    • NullPointer Exception on getReplicationSlotInfo for Postgres DBZ-1380

    • CHARSET is not supported for CAST function DBZ-1397

    • Aria engine is not known by Debezium parser DBZ-1398

    • Debezium does not get the first change after creating the replication slot in PostgreSQL DBZ-1400

    • Built-in database filter throws NPE DBZ-1409

    • Error processing RDS heartbeats DBZ-1410

    • PostgreSQL Connector generates false alarm for empty password DBZ-1379

    Other changes

    This release includes also other changes:

    • Developer Preview Documentation DBZ-1284

    • Upgrade to Apache Kafka 2.3 DBZ-1358

    • Stabilize test executions on CI DBZ-1362

    • Handling tombstone emission option consistently DBZ-1365

    • Avoid creating unnecessary type metadata instances; only init once per column. DBZ-1366

    • Fix tests to run more reliably on Amazon RDS DBZ-1371

    Release 0.10.0.Beta2 (June 27th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Beta2 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Protect against invalid configuration DBZ-1340

    • Make emission of tombstone events configurable DBZ-835

    • Support HSTORE array types DBZ-1337

    Fixes

    This release includes the following fixes:

    • Events for TRUNCATE TABLE not being emitted DBZ-708

    • Connector consumes huge amount of memory DBZ-1065

    • Exception when starting the connector on Kafka Broker 0.10.1.0 DBZ-1270

    • Raise warning when renaming table causes it to be captured or not captured any longer DBZ-1278

    • no viable alternative at input 'ALTER TABLE documents RENAME INDEX' DBZ-1329

    • MySQL DDL parser - issue with triggers and NEW DBZ-1331

    • MySQL DDL parser - issue with COLLATE in functions DBZ-1332

    • Setting "include.unknown.datatypes" to true works for streaming but not during snapshot DBZ-1335

    • PostgreSQL db with materialized view failing during snapshot DBZ-1345

    • Switch RecordsStreamProducer to use non-blocking stream call DBZ-1347

    • Can’t parse create definition on the mysql connector DBZ-1348

    • String literal should support utf8mb3 charset DBZ-1349

    • NO_AUTO_CREATE_USER sql mode is not supported in MySQL 8 DBZ-1350

    • Incorrect assert for invalid timestamp check in MySQL 8 DBZ-1353

    Other changes

    This release includes also other changes:

    • Add to FAQ what to do on offset flush timeout DBZ-799

    • Update MongoDB driver to 3.10.1 DBZ-1333

    • Fix test for partitioned table snapshot DBZ-1342

    • Enable PostGIS for Alpine 9.6 DBZ-1351

    • Fix description for state of Snapshot DBZ-1346

    • Remove unused code for alternative topic selection strategy DBZ-1352

    Release 0.10.0.Beta1 (June 11th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.2.1 and has been tested with version 2.2.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Beta1 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Issue a warning for filters not matching any table/database DBZ-1242

    Fixes

    This release includes the following fixes:

    • Multiple cdc entries with exactly the same commitLsn and changeLsn DBZ-1152

    • PostGIS does not work in Alpine images DBZ-1307

    • Processing MongoDB document contains UNDEFINED type causes exception with MongoDB Unwrap SMT DBZ-1315

    • Partial zero date datetime/timestamp will fail snapshot DBZ-1318

    • Default value set null when modify a column from nullable to not null DBZ-1321

    • Out-of-order chunks don’t initiate commitTime DBZ-1323

    • NullPointerException when receiving noop event DBZ-1317

    Other changes

    This release includes also other changes:

    • Describe structure of SQL Server CDC events DBZ-1296

    • Upgrade to Apache Kafka 2.2.1 DBZ-1316

    Release 0.10.0.Alpha2 (June 3rd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.2.0 and has been tested with version 2.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Alpha2 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The snapshot marking has been overhauled DBZ-1295. Originally the snapshot marker has been field with boolean value indicating whther the record was obtained via snapshot or not. Now it has been turned into three state string enumeration indicating the record came from snapshot (true), is last in the snapshot (last) or is from streaming (false).

    New Features

    • "source" block for MySQL schema change events should contain db and table names DBZ-871

    • Adhere to Dockerfile good practices DBZ-1279

    Fixes

    This release includes the following fixes:

    • DDL that contains user are unparsable by antlr DBZ-1300

    • Only validate history topic name for affected connectors DBZ-1283

    Other changes

    This release includes also other changes:

    • Replace Predicate<Column> with ColumnNameFilter DBZ-1092

    • Upgrade ZooKeeper to 3.4.14 DBZ-1298

    • Upgrade Docker tooling image DBZ-1301

    • Upgrade Debezium Postgres Example image to 11 DBZ-1302

    • Create profile to build assemblies without drivers DBZ-1303

    • Modify release pipeline to use new Dockerfiles DBZ-1304

    • Add 3rd party licences DBZ-1306

    • Remove unused methods from ReplicationStream DBZ-1310

    Release 0.10.0.Alpha1 (May 28th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.2.0 and has been tested with version 2.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Alpha1 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    All connectors now share the common source info block fields DBZ-596. This led to the renaming and/or change of content of some of the source fields. We are providing an option source.struct.version=v1 to use legacy source info block.

    Unwrap SMTs have been renamed DBZ-677 to better express their use.

    MySQL connector now consistently handle database.history.store.only.monitored.tables.ddl for both snapshot and streaming mode DBZ-683. This leads to changes in the contents of database history topic.

    MySQL legacy DDL parser has been removed DBZ-736 and was fully replaced with ANTLR-based parser.

    Oracle and SQL Server connectors now contain database, schema, and table names in the source info block DBZ-875.

    MongoDB now contains both database and collection name in source info block DBZ-1175. The original ns field has been dropped.

    Metric NumberOfEventsSkipped is now available only for MySQL connector DBZ-1209.

    All deprecated features and configuration options DBZ-1234 have been removed from the codebase and are no longer available.

    Outbox routing SMT option names have been renamed to follow a consistent naming schema DBZ-1289.

    New Features

    • Excessive warnings in log about column missing charset DBZ-844

    • Update JDBC (and Mongo) drivers to latest versions DBZ-1273

    • Support snapshot SELECT overrides for SQL Server connector DBZ-1224

    • Generate warning in logs if change table list is empty DBZ-1281

    Fixes

    This release includes the following fixes:

    • MySQL connection with client authentication does not work DBZ-1228

    • Unhandled exception prevents snapshot.mode : when_needed functioning DBZ-1244

    • MySQL connector stops working with a NullPointerException error DBZ-1246

    • CREATE INDEX can fail for non-monitored tables after connector restart DBZ-1264

    • Create a spec file for RPM for postgres protobuf plugin DBZ-1272

    • Last transaction events get duplicated on EmbeddedEngine MySQL connector restart DBZ-1276

    Other changes

    This release includes also other changes:

    • Misleading description for column.mask.with.length.chars parameter DBZ-1290

    • Clean up integration tests under integration-tests DBZ-263

    • Consolidate DDL parser tests DBZ-733

    • Document "database.ssl.mode" option DBZ-985

    • Synchronize MySQL grammar with upstream grammar DBZ-1127

    • Add FAQ entry about -XX:+UseStringDeduplication JVM flag DBZ-1139

    • Test and handle time 24:00:00 supported by PostgreSQL DBZ-1164

    • Define final record format for MySQL, Postgres, SQL Server and MongoDB DBZ-1235

    • Improve error reporting in case of misaligned schema and data DBZ-1257

    • Adding missing contributors to COPYRIGHT.txt DBZ-1259

    • Automate contributor check during release pipeline. DBZ-1282

    \ No newline at end of file + Release Notes for Debezium 0.10

    Release Notes for Debezium 0.10

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 0.10.0.Final (October 2nd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Final from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Debezium Postgres replication with pgoutput plugin sending events slowly for non-batched insertions DBZ-1515

    • ExtractNewRecordState access operation field before checking message format DBZ-1517

    Other changes

    This release includes also other changes:

    • Go back to original PG 10 container image for testing DBZ-1504

    • Support delete propagation in end-to-end demo DBZ-1506

    • Update Unwrap/UnwrapMongoDB SMT demos to use latest Debezium and delete event support DBZ-1516

    Release 0.10.0.CR2 (September 26th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.CR2 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.CR2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.CR2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The data type MicroDuration for representing INTERVAL columns (as supported by the Postgres and Oracle connectors) has been changed to use int64 rather than float64. The reason being that there are no fractional microseconds values expected. For cases where the microseconds of an interval would overflow int64, there’ll be an alternative String-based mapping be provided in a future Debezium release, which will allow to exactly represent interval values based on their year, month, day etc. parts (see DBZ-1498).

    The behavior of unchanged TOASTed columns has changed in this release (see DBZ-1367). Please upgrade the PostgreSQL connector in conjunction with the Decoderbufs plugin to guarantee that these columns are handled correctly. Please refer to the PostgreSQL connector documentation for more information on unchanged TOASTed columns.

    New Features

    • Allow user to customize key for DB tables through configuration DBZ-1015

    • Replace Custom Schema with Pluggable Serializers via KC Schema in Cassandra Connector DBZ-1405

    • Porting insert fields from source struct feature to ExtractNewDocumentState SMT DBZ-1442

    • Add column_id column to metadata section in messages in Kafka topic DBZ-1483

    Fixes

    This release includes the following fixes:

    • Cannot use Avro for fields with dash in name DBZ-1044

    • Detection of unsupported include-unchanged-toast parameter is failing DBZ-1399

    • Possible issue with Debezium not properly shutting down PG connections during Connect rebalance DBZ-1426

    • Common error when PG connector cannot connect is confusing DBZ-1427

    • Postgres connector does not honor publication.name configuration DBZ-1436

    • Wrong interrupt handling DBZ-1438

    • CREATE DATABASE and TABLE statements do not support DEFAULT charset DBZ-1470

    • Avoid NPE at runtime in EventRouter when incorrect configuration is given. DBZ-1495

    • java.time.format.DateTimeParseException: java.time.format.DateTimeParseException DBZ-1501

    Other changes

    This release includes also other changes:

    • Publish container images to quay.io DBZ-1178

    • Document installation of DecoderBufs plug-in via RPM on Fedora DBZ-1286

    • Fix intermittendly failing Postgres tests DBZ-1383

    • Add MongoDB 4.2 to testing matrix DBZ-1389

    • Upgrade to latest Postgres driver DBZ-1462

    • Use old SMT name in 0.9 docs DBZ-1471

    • Speak of "primary" and "secondary" nodes in the Postgres docs DBZ-1472

    • PostgreSQL snapshot.mode connector option description should include 'exported' DBZ-1473

    • Update example tutorial to show using Avro configuration at connector level DBZ-1474

    • Upgrade protobuf to version 3.8.0 DBZ-1475

    • Logging can be confusing when using fallback replication stream methods DBZ-1479

    • Remove info on when an option was introduced from the docs DBZ-1493

    • Unstable Mysql connector Integration test (shouldProcessCreateUniqueIndex) DBZ-1500

    • Update PostgreSQL documentation DBZ-1503

    • DocumentTest#shouldCreateArrayFromValues() fails on Windows DBZ-1508

    Release 0.10.0.CR1 (September 10th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.CR1 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The ProtoBuf library use by PostgreSQL plugin has been upgraded.

    SQL Server connector now supports Kafka Connect’s temporal datatypes. At the same time the default temporal mode is no longer adaptive_time_microseconds but adaptive. Mode adaptive_time_microseconds is no longer supported.

    New Features

    • Replace YAML Dependency with Property File in Cassandra Connector DBZ-1406

    • Support Connect date/time precision DBZ-1419

    • Exported snapshots are supported by PostgreSQL 9.4+ DBZ-1440

    • Enhance Postgresql & Mysql Docker example images with some Spatial geometry DBZ-1459

    Fixes

    This release includes the following fixes:

    • Date conversion broken if date more than 3000 year DBZ-949

    • Overflowed Timestamp in Postgres Connection DBZ-1205

    • Debezium does not expect a year larger than 9999 DBZ-1255

    • ExportedSnapshotter and InitialOnlySnapshotter should not always execute a snapshot. DBZ-1437

    • Source Fields Not Present on Delete Rewrite DBZ-1448

    • NPE raises when a new connector has nothing to commit DBZ-1457

    • MongoDB connector throws NPE on "op=n" DBZ-1464

    Other changes

    This release includes also other changes:

    • Upgrade ProtoBuf dependency DBZ-1390

    • Engine does not stop on Exception DBZ-1431

    • Create "architecture" and "feature" pages DBZ-1458

    Release 0.10.0.Beta4 (August 16th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Beta4 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Beta4 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Beta4 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The default format of the message values produced by the outbox event router has been changed. It will solely contain the value of the payload column by default. In order to add the eventType value that previously was part of the message value, use the "additional field" configuration option with a placement option of envelope. In this case, the message value will be a complex structure containing the payload key and one additional key for each further field.

    New Features

    • Implement a CDC connector for Apache Cassandra DBZ-607

    • Support "Exported Snapshots" feature for taking lockless snapshots with Postgres DBZ-1035

    • Snapshot Order of tables DBZ-1254

    • Add ability to insert fields from source struct in ExtractNewRecordState SMT DBZ-1395

    Fixes

    This release includes the following fixes:

    • Debezium for MySQL fails on GRANT DELETE ON <table> DBZ-1411

    • Debezium for MySQL tries to flush a table for a database not in the database whitelist DBZ-1414

    • Table scan is performed anyway even if snapshot.mode is set to initial_schema_only DBZ-1417

    • SMT ExtractNewDocumentState does not support Heartbeat events DBZ-1430

    • Postgres connector does not honor publication.name configuration DBZ-1436

    Other changes

    This release includes also other changes:

    • Issue with debezium embedded documentation DBZ-393

    • Refactor Postgres connector to be based on new framework classes DBZ-777

    • Don’t obtain new connection each time when getting xmin position DBZ-1381

    • Unify handling of attributes in EventRouter SMT DBZ-1385

    • DockerHub: show container specific README files DBZ-1387

    • Remove unused dependencies from Cassandra connector DBZ-1424

    • Simplify custom engine name parsing grammar DBZ-1432

    Release 0.10.0.Beta3 (July 23rd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Beta3 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Beta3 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Beta3 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The value of heartbeat messages has been changed, it now contains a field with the timestamp of the heartbeat. Note that the message format of heartbeat messages is considered an implementation detail of Debezium, i.e. its format may be altered incompatibly and consumers should not rely on any specific format.

    New Features

    • Handle tables without primary keys DBZ-916

    • Define exposed connector metrics in MySQL DBZ-1120

    • Set heartbeat interval for the binlog reader DBZ-1338

    • Outbox router should skip heartbeat messages by default DBZ-1388

    • Introduce number ofEventsInError metric DBZ-1222

    • Add option to skip table locks when snapshotting DBZ-1238

    • Explore built-in logical decoding added in Postgres 10 DBZ-766

    • Support deletion events in the outbox routing SMT DBZ-1320

    • Expose metric for progress of DB history recovery DBZ-1356

    Fixes

    This release includes the following fixes:

    • Incorrect offset may be committed despite unparseable DDL statements DBZ-599

    • SavePoints are getting stored in history topic DBZ-794

    • delete message "op:d" on tables with unique combination of 2 primary keys = (composite keys) , the d records are not sent DBZ-1180

    • When a MongoDB collection haven’t had activity for a period of time an initial sync is triggered DBZ-1198

    • Restore compatibility with Kafka 1.x DBZ-1361

    • no viable alternative at input 'LOCK DEFAULT' DBZ-1376

    • NullPointer Exception on getReplicationSlotInfo for Postgres DBZ-1380

    • CHARSET is not supported for CAST function DBZ-1397

    • Aria engine is not known by Debezium parser DBZ-1398

    • Debezium does not get the first change after creating the replication slot in PostgreSQL DBZ-1400

    • Built-in database filter throws NPE DBZ-1409

    • Error processing RDS heartbeats DBZ-1410

    • PostgreSQL Connector generates false alarm for empty password DBZ-1379

    Other changes

    This release includes also other changes:

    • Developer Preview Documentation DBZ-1284

    • Upgrade to Apache Kafka 2.3 DBZ-1358

    • Stabilize test executions on CI DBZ-1362

    • Handling tombstone emission option consistently DBZ-1365

    • Avoid creating unnecessary type metadata instances; only init once per column. DBZ-1366

    • Fix tests to run more reliably on Amazon RDS DBZ-1371

    Release 0.10.0.Beta2 (June 27th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Beta2 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Protect against invalid configuration DBZ-1340

    • Make emission of tombstone events configurable DBZ-835

    • Support HSTORE array types DBZ-1337

    Fixes

    This release includes the following fixes:

    • Events for TRUNCATE TABLE not being emitted DBZ-708

    • Connector consumes huge amount of memory DBZ-1065

    • Exception when starting the connector on Kafka Broker 0.10.1.0 DBZ-1270

    • Raise warning when renaming table causes it to be captured or not captured any longer DBZ-1278

    • no viable alternative at input 'ALTER TABLE documents RENAME INDEX' DBZ-1329

    • MySQL DDL parser - issue with triggers and NEW DBZ-1331

    • MySQL DDL parser - issue with COLLATE in functions DBZ-1332

    • Setting "include.unknown.datatypes" to true works for streaming but not during snapshot DBZ-1335

    • PostgreSQL db with materialized view failing during snapshot DBZ-1345

    • Switch RecordsStreamProducer to use non-blocking stream call DBZ-1347

    • Can’t parse create definition on the mysql connector DBZ-1348

    • String literal should support utf8mb3 charset DBZ-1349

    • NO_AUTO_CREATE_USER sql mode is not supported in MySQL 8 DBZ-1350

    • Incorrect assert for invalid timestamp check in MySQL 8 DBZ-1353

    Other changes

    This release includes also other changes:

    • Add to FAQ what to do on offset flush timeout DBZ-799

    • Update MongoDB driver to 3.10.1 DBZ-1333

    • Fix test for partitioned table snapshot DBZ-1342

    • Enable PostGIS for Alpine 9.6 DBZ-1351

    • Fix description for state of Snapshot DBZ-1346

    • Remove unused code for alternative topic selection strategy DBZ-1352

    Release 0.10.0.Beta1 (June 11th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.2.1 and has been tested with version 2.2.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Beta1 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Issue a warning for filters not matching any table/database DBZ-1242

    Fixes

    This release includes the following fixes:

    • Multiple cdc entries with exactly the same commitLsn and changeLsn DBZ-1152

    • PostGIS does not work in Alpine images DBZ-1307

    • Processing MongoDB document contains UNDEFINED type causes exception with MongoDB Unwrap SMT DBZ-1315

    • Partial zero date datetime/timestamp will fail snapshot DBZ-1318

    • Default value set null when modify a column from nullable to not null DBZ-1321

    • Out-of-order chunks don’t initiate commitTime DBZ-1323

    • NullPointerException when receiving noop event DBZ-1317

    Other changes

    This release includes also other changes:

    • Describe structure of SQL Server CDC events DBZ-1296

    • Upgrade to Apache Kafka 2.2.1 DBZ-1316

    Release 0.10.0.Alpha2 (June 3rd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.2.0 and has been tested with version 2.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Alpha2 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The snapshot marking has been overhauled DBZ-1295. Originally the snapshot marker has been field with boolean value indicating whther the record was obtained via snapshot or not. Now it has been turned into three state string enumeration indicating the record came from snapshot (true), is last in the snapshot (last) or is from streaming (false).

    New Features

    • "source" block for MySQL schema change events should contain db and table names DBZ-871

    • Adhere to Dockerfile good practices DBZ-1279

    Fixes

    This release includes the following fixes:

    • DDL that contains user are unparsable by antlr DBZ-1300

    • Only validate history topic name for affected connectors DBZ-1283

    Other changes

    This release includes also other changes:

    • Replace Predicate<Column> with ColumnNameFilter DBZ-1092

    • Upgrade ZooKeeper to 3.4.14 DBZ-1298

    • Upgrade Docker tooling image DBZ-1301

    • Upgrade Debezium Postgres Example image to 11 DBZ-1302

    • Create profile to build assemblies without drivers DBZ-1303

    • Modify release pipeline to use new Dockerfiles DBZ-1304

    • Add 3rd party licences DBZ-1306

    • Remove unused methods from ReplicationStream DBZ-1310

    Release 0.10.0.Alpha1 (May 28th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.2.0 and has been tested with version 2.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.10.0.Alpha1 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.10.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.10.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    All connectors now share the common source info block fields DBZ-596. This led to the renaming and/or change of content of some of the source fields. We are providing an option source.struct.version=v1 to use legacy source info block.

    Unwrap SMTs have been renamed DBZ-677 to better express their use.

    MySQL connector now consistently handle database.history.store.only.monitored.tables.ddl for both snapshot and streaming mode DBZ-683. This leads to changes in the contents of database history topic.

    MySQL legacy DDL parser has been removed DBZ-736 and was fully replaced with ANTLR-based parser.

    Oracle and SQL Server connectors now contain database, schema, and table names in the source info block DBZ-875.

    MongoDB now contains both database and collection name in source info block DBZ-1175. The original ns field has been dropped.

    Metric NumberOfEventsSkipped is now available only for MySQL connector DBZ-1209.

    All deprecated features and configuration options DBZ-1234 have been removed from the codebase and are no longer available.

    Outbox routing SMT option names have been renamed to follow a consistent naming schema DBZ-1289.

    New Features

    • Excessive warnings in log about column missing charset DBZ-844

    • Update JDBC (and Mongo) drivers to latest versions DBZ-1273

    • Support snapshot SELECT overrides for SQL Server connector DBZ-1224

    • Generate warning in logs if change table list is empty DBZ-1281

    Fixes

    This release includes the following fixes:

    • MySQL connection with client authentication does not work DBZ-1228

    • Unhandled exception prevents snapshot.mode : when_needed functioning DBZ-1244

    • MySQL connector stops working with a NullPointerException error DBZ-1246

    • CREATE INDEX can fail for non-monitored tables after connector restart DBZ-1264

    • Create a spec file for RPM for postgres protobuf plugin DBZ-1272

    • Last transaction events get duplicated on EmbeddedEngine MySQL connector restart DBZ-1276

    Other changes

    This release includes also other changes:

    • Misleading description for column.mask.with.length.chars parameter DBZ-1290

    • Clean up integration tests under integration-tests DBZ-263

    • Consolidate DDL parser tests DBZ-733

    • Document "database.ssl.mode" option DBZ-985

    • Synchronize MySQL grammar with upstream grammar DBZ-1127

    • Add FAQ entry about -XX:+UseStringDeduplication JVM flag DBZ-1139

    • Test and handle time 24:00:00 supported by PostgreSQL DBZ-1164

    • Define final record format for MySQL, Postgres, SQL Server and MongoDB DBZ-1235

    • Improve error reporting in case of misaligned schema and data DBZ-1257

    • Adding missing contributors to COPYRIGHT.txt DBZ-1259

    • Automate contributor check during release pipeline. DBZ-1282

    \ No newline at end of file diff --git a/releases/0.9/index.html b/releases/0.9/index.html index c3cbbd3db0..2d1d94d041 100644 --- a/releases/0.9/index.html +++ b/releases/0.9/index.html @@ -1 +1 @@ - Debezium Release Series 0.9

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.13
    Driver: 8.0.13
    MongoDB Database: 3.2, 3.4, 4.0
    Driver: 3.9.0
    PostgreSQL Database: 9.6, 10, 11
    Driver: 42.2.5
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017
    Driver: 6.4.0.jre8

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file + Debezium Release Series 0.9

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.13
    Driver: 8.0.13
    MongoDB Database: 3.2, 3.4, 4.0
    Driver: 3.9.0
    PostgreSQL Database: 9.6, 10, 11
    Driver: 42.2.5
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017
    Driver: 6.4.0.jre8

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file diff --git a/releases/0.9/release-notes.html b/releases/0.9/release-notes.html index 2691f8fa65..ebc9c62138 100644 --- a/releases/0.9/release-notes.html +++ b/releases/0.9/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 0.9

    Release Notes for Debezium 0.9

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 0.9.5.Final (May 2nd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.2.0 and has been tested with version 2.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.5.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Upgrade to Kafka 2.2.0 DBZ-1227

    • Ability to specify batch size during snapshot DBZ-1247

    • Postgresql ARRAY support DBZ-1076

    • Add support macaddr and macaddr8 PostgreSQL column types DBZ-1193

    Fixes

    This release includes the following fixes:

    • Failing to specify value for database.server.name results in invalid Kafka topic name DBZ-212

    • Escape sequence handling needs to be unified DBZ-481

    • Postgres Connector times out in schema discovery for DBs with many tables DBZ-1214

    • Oracle connector: JDBC transaction can only capture single DML record DBZ-1223

    • Enable enumeration options to contain escaped characters or commas. DBZ-1226

    • Antlr parser fails on column named with MODE keyword DBZ-1233

    • Lost precision for timestamp with timezone DBZ-1236

    • NullpointerException due to optional value for commitTime DBZ-1241

    • Default value for datetime(0) is incorrectly handled DBZ-1243

    • Postgres connector failing because empty state data is being stored in offsets topic DBZ-1245

    • Default value for Bit does not work for larger values DBZ-1249

    • Microsecond precision is lost when reading timetz data from Postgres. DBZ-1260

    Other changes

    This release includes also other changes:

    • Zookeeper image documentation does not describe txns mountpoint DBZ-1231

    • Parse enum and set options with Antlr DBZ-739

    Release 0.9.4.Final (April 11th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.1 and has been tested with version 2.1.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.4.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Add MySQL Connector metric to expose "number of filtered events" DBZ-1206

    • Support TLS 1.2 for MySQL DBZ-1208

    • Create new MysqlConnector metric exposing if the connector is tracking offsets using GTIDs or not. DBZ-1221

    • Add support for columns of type INET DBZ-1189

    Fixes

    This release includes the following fixes:

    • Incorrect value for datetime field for '0001-01-01 00:00:00' DBZ-1143

    • PosgreSQL DecoderBufs crash when working with geometries in "public" schema DBZ-1144

    • [postgres] differing logic between snapsnot and streams for create record DBZ-1163

    • Error while deserializing binlog event DBZ-1191

    • MySQL connector throw an exception when captured invalid datetime DBZ-1194

    • Error when alter Enum column with CHARACTER SET DBZ-1203

    • Mysql: Getting ERROR Failed due to error: connect.errors.ConnectException: For input string: "false" DBZ-1204

    • MySQL connection timeout after bootstrapping a new table DBZ-1207

    • SLF4J usage issues DBZ-1212

    • JDBC Connection Not Closed in MySQL Connector Snapshot Reader DBZ-1218

    • Support FLOAT(p) column definition style DBZ-1220

    Other changes

    This release includes also other changes:

    • Add WhitespaceAfter check to Checkstyle DBZ-362

    • Document RDS Postgres wal_level behavior DBZ-1219

    Release 0.9.3.Final (March 25th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.1 and has been tested with version 2.1.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.3.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Support Outbox SMT as part of Debezium core DBZ-1169

    • Add support for partial recovery from lost slot in postgres DBZ-1082

    Fixes

    This release includes the following fixes:

    • Postgresql Snapshot with a table that has > 8192records hangs DBZ-1161

    • HStores fail to Snapshot properly DBZ-1162

    • NullPointerException When there are multiple tables in different schemas in the whitelist DBZ-1166

    • Cannot set offset.flush.interval.ms via docker entrypoint DBZ-1167

    • Missing Oracle OCI library is not reported as error DBZ-1170

    • RecordsStreamProducer forgets to convert commitTime from nanoseconds to microseconds DBZ-1174

    • MongoDB Connector doesn’t fail on invalid hosts configuration DBZ-1177

    • Handle NPE errors when trying to create history topic against confluent cloud DBZ-1179

    • The Postgres wal2json streaming and non-streaming decoders do not process empty events DBZ-1181

    • Can’t continue after snapshot is done DBZ-1184

    • ParsingException for SERIAL keyword DBZ-1185

    • STATS_SAMPLE_PAGES config cannot be parsed DBZ-1186

    • MySQL Connector generates false alarm for empty password DBZ-1188

    Other changes

    This release includes also other changes:

    • Ensure no brace-less if() blocks are used in the code base DBZ-1039

    • Align Oracle DDL parser code to use the same structure as MySQL DBZ-1192

    Release 0.9.2.Final (February 22nd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.1 and has been tested with version 2.1.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.2.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Add snapshotting mode NEVER for MongoDB connector DBZ-867

    • Allow passing of arbitrary parameters when replication slot is started DBZ-1130

    Fixes

    This release includes the following fixes:

    • Integer default value for DECIMAL column fails with Avro Converter DBZ-1077

    • connect binds only to hostname interface DBZ-1108

    • Connector fails to connect to binlog on connectors rebalance, throws ServerException DBZ-1132

    • Fail to parse MySQL TIME with values bigger than 23:59:59.999999 DBZ-1137

    • Test dependencies shouldn’t be part of the SQL Server connector archive DBZ-1138

    • Emit correctly-typed fallback values for replica identity DEFAULT DBZ-1141

    • Unexpected exception while streaming changes from row with unchanged toast DBZ-1146

    • SQL syntax error near '"gtid_purged"' DBZ-1147

    • Postgres delete operations throwing DataException DBZ-1149

    • Antlr parser fails on column names that are keywords DBZ-1150

    • SqlServerConnector doesn’t work with table names with "special characters" DBZ-1153

    Other changes

    This release includes also other changes:

    • Describe topic-level settings to ensure event consumption when log compaction is enabled DBZ-1136

    • Upgrade binlog client to 0.19.0 DBZ-1140

    • Upgrade kafkacat to 1.4.0-RC1 DBZ-1148

    • Upgrade Avro connector version to 5.1.2 DBZ-1156

    • Upgrade to Kafka 2.1.1 DBZ-1157

    Release 0.9.1.Final (February 13th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.0 and has been tested with version 2.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.1.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Provide new container image with tooling for examples and demos DBZ-1125

    Fixes

    This release includes the following fixes:

    • BigDecimal has mismatching scale value for given Decimal schema error due to permissive mysql ddl DBZ-983

    • Primary key changes cause UnsupportedOperationException DBZ-997

    • java.lang.IllegalArgumentException: timeout value is negative DBZ-1019

    • Connector consumes huge amount of memory DBZ-1065

    • Strings.join() doesn’t apply conversation for first element DBZ-1112

    • NPE if database history filename has no parent folder DBZ-1122

    • Generated columns not supported by DDL parser DBZ-1123

    • Advancing LSN in the first iteration - possible data loss DBZ-1128

    • Incorrect LSN comparison can cause out of order processing DBZ-1131

    Other changes

    This release includes also other changes:

    • io.debezium.connector.postgresql.PostgisGeometry shouldn’t use DatatypeConverter DBZ-962

    • Schema change events should be of type ALTER when table is modified DBZ-1121

    • Wal2json ISODateTimeFormatTest fails with a locale other than Locale.ENGLISH DBZ-1126

    Known issues

    A potential race condition was identified in upstream library for MySQL’s binary log processing. The problem exhibits as the issue DBZ-1132. If you are affected by it we propose as the workaround to increase Kafka Connect configuration options task.shutdown.graceful.timeout.ms and connect.rebalance.timeout.ms. If the problem persists please disable keepalive thread via Debezium configration option connect.keep.alive.

    Release 0.9.0.Final (February 5th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.0 and has been tested with version 2.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Expose more useful metrics and improve Grafana dashboard DBZ-1040

    Fixes

    This release includes the following fixes:

    • Allow to use drop-slot-on-close option with wal2json DBZ-1111

    • MySqlDdlParser does not support adding multiple partitions in a single ALTER TABLE …​ ADD PARTITION statement DBZ-1113

    • Debezium fails to take a lock during snapshot DBZ-1115

    • Data from Postgres partitioned table written to wrong topic during snapshot DBZ-1118

    Other changes

    This release includes also other changes:

    • Clarify whether DDL parser is actually needed for SQL Server connector DBZ-1096

    • Add design description to SqlServerStreamingChangeEventSource DBZ-1097

    • Put out message about missing LSN at WARN level DBZ-1116

    Release 0.9.0.CR1 (January 19th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.0 and has been tested with version 2.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.CR1 from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    SQL Server connector has re-worked semantics of snapshot modes (DBZ-947).
    SQL Server connector also adds a new field to offsets in the streaming mode (DBZ-1090) which could prevent seamless upgrading of versions. We recommend to re-register and restart the connector.
    SQL Server connector has changed the schema name of messages schemas (DBZ-1089), superfluous database name has been dropped.

    New Features

    • Snapshot isolation level overhaul DBZ-947

    • Kafka docker image - support for topic cleanup policy DBZ-1038

    • Optimize sys.fn_cdc_map_lsn_to_time() calls DBZ-1078

    • Fallback to restart_lsn if confirmed_flush_lsn is not found DBZ-1081

    • table.whitelist option update for an existing connector doesn’t work DBZ-175

    • EmbeddedEngine should allow for more flexible record consumption DBZ-1080

    • Client-side column blacklisting in SQL Server connector DBZ-1067

    • column.propagate.source.type missing scale DBZ-1073

    Fixes

    This release includes the following fixes:

    • ArrayIndexOutOfBoundsException when a column is deleted (Postgres) DBZ-996

    • Messages from tables without PK and with REPLICA IDENTITY FULL DBZ-1029

    • Inconsistent schema name in streaming and snapshotting phase DBZ-1051

    • "watch-topic" and "create-topic" commands fail DBZ-1057

    • Antlr Exception: mismatched input '.' expecting {<EOF>, '--'} DBZ-1059

    • MySQL JDBC Context sets the wrong truststore password DBZ-1062

    • Unsigned smallint column in mysql failing due to out of range error DBZ-1063

    • NULL Values are replaced by default values even in NULLABLE fields DBZ-1064

    • Uninformative "Found previous offset" log DBZ-1066

    • SQL Server connector does not persist LSNs in Kafka DBZ-1069

    • [debezium] ERROR: option \"include-unchanged-toast\" = \"0\" is unknown DBZ-1083

    • Debezium fails when consuming table without primary key with turned on topic routing DBZ-1086

    • Wrong message key and event used when primary key is updated DBZ-1088

    • Connect schema name is wrong for SQL Server DBZ-1089

    • Incorrect LSN tracking - possible data loss DBZ-1090

    • Race condition in EmbeddedEngine shutdown DBZ-1103

    Other changes

    This release includes also other changes:

    • Intermittent failures in RecordsStreamProducerIT#shouldPropagateSourceColumnTypeToSchemaParameter() DBZ-781

    • Assert MongoDB supported versions DBZ-988

    • Describe how to do DDL changes for SQL Server DBZ-993

    • Verify version of wal2json on RDS DBZ-1056

    • Move SQL Server connector to main repo DBZ-1084

    • Don’t enqueue further records when connector is stopping DBZ-1099

    • Race condition in SQLServer tests during snapshot phase DBZ-1101

    • Remove columnNames field from TableImpl DBZ-1105

    • column.propagate.source.type missing scale DBZ-387

    • write catch-up binlog reader DBZ-388

    • changes to Snapshot and Binlog readers to allow for concurrent/partial running DBZ-389

    Release 0.9.0.Beta2 (December 19th, 2018)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.0 and has been tested with version 2.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Beta2 from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The MongoDB CDC Event Flattening transformation now by default removes deletion messages (DBZ-563). The previous default was to keep them.

    New Features

    • Add support for Oracle 11g DBZ-954

    • UnwrapFromMongoDbEnvelope refactor DBZ-1020

    • Add option for dropping deletes and tombstone events to MongoDB struct recreation SMT DBZ-563

    • Expose "snapshot.delay.ms" option for all connectors DBZ-966

    • Convey original operation type when using flattening SMTs DBZ-971

    • Provide last event and captured tables in metrics DBZ-978

    • Skip MySQL BinLog Event in case of Invalid Cell Values DBZ-1010

    Fixes

    This release includes the following fixes:

    • BinaryLogClient can’t disconnect when adding records after shutdown has been initiated DBZ-604

    • UnwrapFromMongoDbEnvelope fails when encountering $unset operator DBZ-612

    • "no known snapshots" error when DBs rows are large DBZ-842

    • MongoDB connector stops processing oplog events after encountering "new primary" event DBZ-848

    • MySQL active-passive: brief data loss on failover when Debezium encounters new GTID channel DBZ-923

    • ConnectException: Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine DBZ-960

    • ConnectException during ALTER TABLE for non-whitelisted table DBZ-977

    • UnwrapFromMongoDbEnvelope fails when encountering full updates DBZ-987

    • UnwrapFromMongoDbEnvelope fails when encountering Tombstone messages DBZ-989

    • Postgres schema changes detection (not-null constraint) DBZ-1000

    • NPE in SqlServerConnectorTask#cleanupResources() if connector failed to start DBZ-1002

    • Explicitly initialize history topic in HistorizedRelationalDatabaseSchema DBZ-1003

    • BinlogReader ignores GTIDs for empty database DBZ-1005

    • NPE in MySqlConnectorTask.stop() DBZ-1006

    • The name of captured but not whitelisted table is not logged DBZ-1007

    • GTID set is not properly initialized after DB failover DBZ-1008

    • Postgres Connector fails on none nullable MACADDR field during initial snapshot DBZ-1009

    • Connector crashes with java.lang.NullPointerException when using multiple sinks to consume the messages DBZ-1017

    • Postgres connector fails upon event of recently deleted table DBZ-1021

    • ORA-46385: DML and DDL operations are not allowed on table "AUDSYS"."AUD$UNIFIED" DBZ-1023

    • Postgres plugin does not signal the end of snapshot properly DBZ-1024

    • MySQL Antlr runtime.NoViableAltException DBZ-1028

    • Debezium 0.8.2 and 0.8.3.Final Not Available on Confluent Hub DBZ-1030

    • Snapshot of tables with reserved names fails DBZ-1031

    • UnwrapFromMongoDbEnvelope doesn’t support operation header on tombstone messages DBZ-1032

    • Mysql binlog reader lost data if restart task when last binlog event is QUERY event. DBZ-1033

    • The same capture instance name is logged twice DBZ-1047

    Other changes

    This release includes also other changes:

    • MySQL 8 compatibility DBZ-688

    • Don’t hard code list of supported MySQL storage engines in Antlr grammar DBZ-992

    • Provide updated KSQL example DBZ-999

    • Update to Kafka 2.1 DBZ-1001

    • Skipt Antlr tests when tests are skipped DBZ-1004

    • Fix expected records counts in MySQL tests DBZ-1016

    • Cannot run tests against Kafka 1.x DBZ-1037

    • Configure MySQL Matrix testing job to test with and without GTID DBZ-1050

    Release 0.9.0.Beta1 (November 20th, 2018)

    Kafka compatibility

    This release has been built against Kafka Connect 2.0.1 and has been tested with version 2.0.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Beta1 from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    MySQL Connector now uses Antlr parser as the default.

    New Features

    • Add STATUS_STORAGE_TOPIC environment variable to container images DBZ-893

    • Support Postgres 11 in Decoderbufs DBZ-955

    • Define the data directory where tests are storing their data DBZ-963

    • Upgrade Kafka to 2.0.1 DBZ-979

    • Implement unified metrics across connectors DBZ-776

    • Initial snapshot using snapshot isolation level DBZ-941

    • Add decimal.handling.mode for SQLServer Configuration DBZ-953

    • Support pass-through of "database." properties to JDBC driver DBZ-964

    • Handle changes of table definitions and tables created while streaming DBZ-812

    Fixes

    This release includes the following fixes:

    • Error while parsing JSON column type for MySQL DBZ-935

    • wal2json CITEXT columns set to empty strings DBZ-937

    • Base docker image is deprecated DBZ-939

    • Mysql connector failed to parse add partition statement DBZ-959

    • PostgreSQL replication slots not updated in transactions DBZ-965

    • wal2json_streaming decoder does not provide the right plugin name DBZ-970

    • Create topics command doesn’t work in Kafka docker image DBZ-976

    • Antlr parser: support quoted engine names in DDL DBZ-990

    Other changes

    This release includes also other changes:

    • Switch to Antlr-based parser implementation by default DBZ-757

    • Support RENAME column syntax from MySQL 8.0 DBZ-780

    • Fix documentation of 'array.encoding' option DBZ-925

    • Support MongoDB 4.0 DBZ-974

    Release 0.9.0.Alpha2 (October 4th, 2018)

    Kafka compatibility

    This release has been built against Kafka Connect 2.0.0 and has been tested with version 2.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Alpha2 from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    MySQL JDBC driver was upgraded to version 8.x. Kafka has been upgraded to version 2.0.0.

    New Features

    • Build Alpine Linux versions of the PostgreSQL containers DBZ-705

    • Refactor methods to read MySQL sytem variables DBZ-849

    • Correct param name for excludeColumns(String fullyQualifiedTableNames) DBZ-854

    • Make BinlogReader#informAboutUnknownTableIfRequired() log with tableId DBZ-855

    • MySQL identifier with dot or space could not be parsed DBZ-878

    • Use postgres:10 instead of postgres:10.0 as base docker image DBZ-929

    • Support temporary replication slots with Postgres >= 10 DBZ-934

    • Support white/black-listing Mongo fields DBZ-633

    • Postgres connector - add database, schema and table names to "source" section of records DBZ-866

    • Support renaming Mongo fields DBZ-881

    • use tcpKeepAlive by default DBZ-895

    • Hstore support in Postgresql-connector DBZ-898

    • Add connector type to source info DBZ-918

    Fixes

    This release includes the following fixes:

    • Global read lock not release when exception raised during snapshot DBZ-769

    • Abort loops in MongoPrimary#execute() if the connector is stopped DBZ-784

    • Initial synchronization is not interrupted DBZ-838

    • Kafka database history miscounting attempts even if there are more database history records to consume DBZ-853

    • Schema_only snapshot on idle server - offsets not stored after snapshot DBZ-859

    • DDL parsing in MySQL - default value of primary key is set to null DBZ-860

    • Antlr DDL parser exception for "create database …​ CHARSET=…​" DBZ-864

    • Error when MongoDB collection contains characters not compatible with kafka topic naming DBZ-865

    • AlterTableParserListener does not remove column definition listeners DBZ-869

    • MySQL parser does not recognize 0 as default value for date/time DBZ-870

    • Antlr parser ignores table whitelist filter DBZ-872

    • A new column might not be added with ALTER TABLE antlr parser DBZ-877

    • MySQLConnectorTask always reports it has the required Binlog file from MySQL DBZ-880

    • Execution of RecordsStreamProducer.closeConnections() is susceptible to race condition DBZ-887

    • Watch-topic command in docker image uses unsupported parameter DBZ-890

    • SQLServer should use only schema and table name in table naming DBZ-894

    • Prevent resending of duplicate change events after restart DBZ-897

    • PostgresConnection.initTypeRegistry() takes ~24 mins DBZ-899

    • java.time.format.DateTimeParseException: Text '1970-01-01 00:00:00' in mysql ALTER DBZ-901

    • org.antlr.v4.runtime.NoViableAltException on CREATE DEFINER=web@% PROCEDURE `…​ DBZ-903

    • MySQL default port is wrong in tutorial link DBZ-904

    • RecordsStreamProducer should report refresh of the schema due to different column count DBZ-907

    • MongoDbConnector returns obsolete config values during validation DBZ-908

    • Can’t parse create definition on the mysql connector DBZ-910

    • RecordsStreamProducer#columnValues() does not take into account unchanged TOASTed columns, refreshing table schemas unnecessarily DBZ-911

    • Wrong type in timeout call for Central wait release DBZ-914

    • Exception while parsing table schema with invalid default value for timestamp field DBZ-927

    • Discard null fields in MongoDB event flattening SMT DBZ-928

    Other changes

    This release includes also other changes:

    • Create Travis CI build for debezium-incubator repository DBZ-817

    • Cache prepared statements in JdbcConnection DBZ-819

    • Upgrade to Kafka 2.0.0 DBZ-858

    • Upgrad SQL Server image to CU9 GDR2 release DBZ-873

    • Speed-up Travis builds using parallel build DBZ-874

    • Add version format check into the release pipeline DBZ-884

    • Handle non-complete list of plugins DBZ-885

    • Parametrize wait time for Maven central sync DBZ-889

    • Assert non-empty release in release script DBZ-891

    • Upgrade Postgres driver to 42.2.5 DBZ-912

    • Upgrade MySQL JDBC driver to version 8.0.x DBZ-763

    • Upgrade MySQL binlog connector DBZ-764

    Release 0.9.0.Alpha1 (July 26th, 2018)

    Kafka compatibility

    This release has been built against Kafka Connect 1.1.1 and has been tested with version 1.1.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Alpha1 from any of the earlier 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The Oracle connector was storing event timestamp in the source block in field ts_sec. The time stamp is in fact measured in milliseconds to so the field was renamed to ts_ms.

    New Features

    • Ingest change data from SQL Server databases DBZ-40

    • Oracle connector implementation cont’d (initial snapshotting etc.) DBZ-716

    • Implement initial snapshotting for Oracle DBZ-720

    • Implement capturing of streamed changes DBZ-787

    • Implement initial snapshotting for SQL Server DBZ-788

    • Emit NUMBER columns as Int32/Int64 if precision and scale allow DBZ-804

    • Support heartbeat messages for Oracle DBZ-815

    • Upgrade to Kafka 1.1.1 DBZ-829

    Fixes

    This release includes the following fixes:

    • Offset remains with "snapshot" set to true after completing schema only snapshot DBZ-803

    • Misleading timestamp field name DBZ-795

    • Adjust scale of decimal values to column’s scale if present DBZ-818

    • Avoid NPE if commit is called before any offset is prepared DBZ-826

    Other changes

    This release includes also other changes:

    • Make DatabaseHistory set-up code re-usable DBZ-816

    • Use TableFilter contract instead of Predicate<TableId> DBZ-793

    • Expand SourceInfo DBZ-719

    • Provide Maven module and Docker set-up DBZ-786

    • Avoid a few raw type warnings DBZ-801

    \ No newline at end of file + Release Notes for Debezium 0.9

    Release Notes for Debezium 0.9

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 0.9.5.Final (May 2nd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.2.0 and has been tested with version 2.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.5.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Upgrade to Kafka 2.2.0 DBZ-1227

    • Ability to specify batch size during snapshot DBZ-1247

    • Postgresql ARRAY support DBZ-1076

    • Add support macaddr and macaddr8 PostgreSQL column types DBZ-1193

    Fixes

    This release includes the following fixes:

    • Failing to specify value for database.server.name results in invalid Kafka topic name DBZ-212

    • Escape sequence handling needs to be unified DBZ-481

    • Postgres Connector times out in schema discovery for DBs with many tables DBZ-1214

    • Oracle connector: JDBC transaction can only capture single DML record DBZ-1223

    • Enable enumeration options to contain escaped characters or commas. DBZ-1226

    • Antlr parser fails on column named with MODE keyword DBZ-1233

    • Lost precision for timestamp with timezone DBZ-1236

    • NullpointerException due to optional value for commitTime DBZ-1241

    • Default value for datetime(0) is incorrectly handled DBZ-1243

    • Postgres connector failing because empty state data is being stored in offsets topic DBZ-1245

    • Default value for Bit does not work for larger values DBZ-1249

    • Microsecond precision is lost when reading timetz data from Postgres. DBZ-1260

    Other changes

    This release includes also other changes:

    • Zookeeper image documentation does not describe txns mountpoint DBZ-1231

    • Parse enum and set options with Antlr DBZ-739

    Release 0.9.4.Final (April 11th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.1 and has been tested with version 2.1.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.4.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Add MySQL Connector metric to expose "number of filtered events" DBZ-1206

    • Support TLS 1.2 for MySQL DBZ-1208

    • Create new MysqlConnector metric exposing if the connector is tracking offsets using GTIDs or not. DBZ-1221

    • Add support for columns of type INET DBZ-1189

    Fixes

    This release includes the following fixes:

    • Incorrect value for datetime field for '0001-01-01 00:00:00' DBZ-1143

    • PosgreSQL DecoderBufs crash when working with geometries in "public" schema DBZ-1144

    • [postgres] differing logic between snapsnot and streams for create record DBZ-1163

    • Error while deserializing binlog event DBZ-1191

    • MySQL connector throw an exception when captured invalid datetime DBZ-1194

    • Error when alter Enum column with CHARACTER SET DBZ-1203

    • Mysql: Getting ERROR Failed due to error: connect.errors.ConnectException: For input string: "false" DBZ-1204

    • MySQL connection timeout after bootstrapping a new table DBZ-1207

    • SLF4J usage issues DBZ-1212

    • JDBC Connection Not Closed in MySQL Connector Snapshot Reader DBZ-1218

    • Support FLOAT(p) column definition style DBZ-1220

    Other changes

    This release includes also other changes:

    • Add WhitespaceAfter check to Checkstyle DBZ-362

    • Document RDS Postgres wal_level behavior DBZ-1219

    Release 0.9.3.Final (March 25th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.1 and has been tested with version 2.1.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.3.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Support Outbox SMT as part of Debezium core DBZ-1169

    • Add support for partial recovery from lost slot in postgres DBZ-1082

    Fixes

    This release includes the following fixes:

    • Postgresql Snapshot with a table that has > 8192records hangs DBZ-1161

    • HStores fail to Snapshot properly DBZ-1162

    • NullPointerException When there are multiple tables in different schemas in the whitelist DBZ-1166

    • Cannot set offset.flush.interval.ms via docker entrypoint DBZ-1167

    • Missing Oracle OCI library is not reported as error DBZ-1170

    • RecordsStreamProducer forgets to convert commitTime from nanoseconds to microseconds DBZ-1174

    • MongoDB Connector doesn’t fail on invalid hosts configuration DBZ-1177

    • Handle NPE errors when trying to create history topic against confluent cloud DBZ-1179

    • The Postgres wal2json streaming and non-streaming decoders do not process empty events DBZ-1181

    • Can’t continue after snapshot is done DBZ-1184

    • ParsingException for SERIAL keyword DBZ-1185

    • STATS_SAMPLE_PAGES config cannot be parsed DBZ-1186

    • MySQL Connector generates false alarm for empty password DBZ-1188

    Other changes

    This release includes also other changes:

    • Ensure no brace-less if() blocks are used in the code base DBZ-1039

    • Align Oracle DDL parser code to use the same structure as MySQL DBZ-1192

    Release 0.9.2.Final (February 22nd, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.1 and has been tested with version 2.1.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.2.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Add snapshotting mode NEVER for MongoDB connector DBZ-867

    • Allow passing of arbitrary parameters when replication slot is started DBZ-1130

    Fixes

    This release includes the following fixes:

    • Integer default value for DECIMAL column fails with Avro Converter DBZ-1077

    • connect binds only to hostname interface DBZ-1108

    • Connector fails to connect to binlog on connectors rebalance, throws ServerException DBZ-1132

    • Fail to parse MySQL TIME with values bigger than 23:59:59.999999 DBZ-1137

    • Test dependencies shouldn’t be part of the SQL Server connector archive DBZ-1138

    • Emit correctly-typed fallback values for replica identity DEFAULT DBZ-1141

    • Unexpected exception while streaming changes from row with unchanged toast DBZ-1146

    • SQL syntax error near '"gtid_purged"' DBZ-1147

    • Postgres delete operations throwing DataException DBZ-1149

    • Antlr parser fails on column names that are keywords DBZ-1150

    • SqlServerConnector doesn’t work with table names with "special characters" DBZ-1153

    Other changes

    This release includes also other changes:

    • Describe topic-level settings to ensure event consumption when log compaction is enabled DBZ-1136

    • Upgrade binlog client to 0.19.0 DBZ-1140

    • Upgrade kafkacat to 1.4.0-RC1 DBZ-1148

    • Upgrade Avro connector version to 5.1.2 DBZ-1156

    • Upgrade to Kafka 2.1.1 DBZ-1157

    Release 0.9.1.Final (February 13th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.0 and has been tested with version 2.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.1.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Provide new container image with tooling for examples and demos DBZ-1125

    Fixes

    This release includes the following fixes:

    • BigDecimal has mismatching scale value for given Decimal schema error due to permissive mysql ddl DBZ-983

    • Primary key changes cause UnsupportedOperationException DBZ-997

    • java.lang.IllegalArgumentException: timeout value is negative DBZ-1019

    • Connector consumes huge amount of memory DBZ-1065

    • Strings.join() doesn’t apply conversation for first element DBZ-1112

    • NPE if database history filename has no parent folder DBZ-1122

    • Generated columns not supported by DDL parser DBZ-1123

    • Advancing LSN in the first iteration - possible data loss DBZ-1128

    • Incorrect LSN comparison can cause out of order processing DBZ-1131

    Other changes

    This release includes also other changes:

    • io.debezium.connector.postgresql.PostgisGeometry shouldn’t use DatatypeConverter DBZ-962

    • Schema change events should be of type ALTER when table is modified DBZ-1121

    • Wal2json ISODateTimeFormatTest fails with a locale other than Locale.ENGLISH DBZ-1126

    Known issues

    A potential race condition was identified in upstream library for MySQL’s binary log processing. The problem exhibits as the issue DBZ-1132. If you are affected by it we propose as the workaround to increase Kafka Connect configuration options task.shutdown.graceful.timeout.ms and connect.rebalance.timeout.ms. If the problem persists please disable keepalive thread via Debezium configration option connect.keep.alive.

    Release 0.9.0.Final (February 5th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.0 and has been tested with version 2.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Final from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Expose more useful metrics and improve Grafana dashboard DBZ-1040

    Fixes

    This release includes the following fixes:

    • Allow to use drop-slot-on-close option with wal2json DBZ-1111

    • MySqlDdlParser does not support adding multiple partitions in a single ALTER TABLE …​ ADD PARTITION statement DBZ-1113

    • Debezium fails to take a lock during snapshot DBZ-1115

    • Data from Postgres partitioned table written to wrong topic during snapshot DBZ-1118

    Other changes

    This release includes also other changes:

    • Clarify whether DDL parser is actually needed for SQL Server connector DBZ-1096

    • Add design description to SqlServerStreamingChangeEventSource DBZ-1097

    • Put out message about missing LSN at WARN level DBZ-1116

    Release 0.9.0.CR1 (January 19th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.0 and has been tested with version 2.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.CR1 from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    SQL Server connector has re-worked semantics of snapshot modes (DBZ-947).
    SQL Server connector also adds a new field to offsets in the streaming mode (DBZ-1090) which could prevent seamless upgrading of versions. We recommend to re-register and restart the connector.
    SQL Server connector has changed the schema name of messages schemas (DBZ-1089), superfluous database name has been dropped.

    New Features

    • Snapshot isolation level overhaul DBZ-947

    • Kafka docker image - support for topic cleanup policy DBZ-1038

    • Optimize sys.fn_cdc_map_lsn_to_time() calls DBZ-1078

    • Fallback to restart_lsn if confirmed_flush_lsn is not found DBZ-1081

    • table.whitelist option update for an existing connector doesn’t work DBZ-175

    • EmbeddedEngine should allow for more flexible record consumption DBZ-1080

    • Client-side column blacklisting in SQL Server connector DBZ-1067

    • column.propagate.source.type missing scale DBZ-1073

    Fixes

    This release includes the following fixes:

    • ArrayIndexOutOfBoundsException when a column is deleted (Postgres) DBZ-996

    • Messages from tables without PK and with REPLICA IDENTITY FULL DBZ-1029

    • Inconsistent schema name in streaming and snapshotting phase DBZ-1051

    • "watch-topic" and "create-topic" commands fail DBZ-1057

    • Antlr Exception: mismatched input '.' expecting {<EOF>, '--'} DBZ-1059

    • MySQL JDBC Context sets the wrong truststore password DBZ-1062

    • Unsigned smallint column in mysql failing due to out of range error DBZ-1063

    • NULL Values are replaced by default values even in NULLABLE fields DBZ-1064

    • Uninformative "Found previous offset" log DBZ-1066

    • SQL Server connector does not persist LSNs in Kafka DBZ-1069

    • [debezium] ERROR: option \"include-unchanged-toast\" = \"0\" is unknown DBZ-1083

    • Debezium fails when consuming table without primary key with turned on topic routing DBZ-1086

    • Wrong message key and event used when primary key is updated DBZ-1088

    • Connect schema name is wrong for SQL Server DBZ-1089

    • Incorrect LSN tracking - possible data loss DBZ-1090

    • Race condition in EmbeddedEngine shutdown DBZ-1103

    Other changes

    This release includes also other changes:

    • Intermittent failures in RecordsStreamProducerIT#shouldPropagateSourceColumnTypeToSchemaParameter() DBZ-781

    • Assert MongoDB supported versions DBZ-988

    • Describe how to do DDL changes for SQL Server DBZ-993

    • Verify version of wal2json on RDS DBZ-1056

    • Move SQL Server connector to main repo DBZ-1084

    • Don’t enqueue further records when connector is stopping DBZ-1099

    • Race condition in SQLServer tests during snapshot phase DBZ-1101

    • Remove columnNames field from TableImpl DBZ-1105

    • column.propagate.source.type missing scale DBZ-387

    • write catch-up binlog reader DBZ-388

    • changes to Snapshot and Binlog readers to allow for concurrent/partial running DBZ-389

    Release 0.9.0.Beta2 (December 19th, 2018)

    Kafka compatibility

    This release has been built against Kafka Connect 2.1.0 and has been tested with version 2.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Beta2 from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The MongoDB CDC Event Flattening transformation now by default removes deletion messages (DBZ-563). The previous default was to keep them.

    New Features

    • Add support for Oracle 11g DBZ-954

    • UnwrapFromMongoDbEnvelope refactor DBZ-1020

    • Add option for dropping deletes and tombstone events to MongoDB struct recreation SMT DBZ-563

    • Expose "snapshot.delay.ms" option for all connectors DBZ-966

    • Convey original operation type when using flattening SMTs DBZ-971

    • Provide last event and captured tables in metrics DBZ-978

    • Skip MySQL BinLog Event in case of Invalid Cell Values DBZ-1010

    Fixes

    This release includes the following fixes:

    • BinaryLogClient can’t disconnect when adding records after shutdown has been initiated DBZ-604

    • UnwrapFromMongoDbEnvelope fails when encountering $unset operator DBZ-612

    • "no known snapshots" error when DBs rows are large DBZ-842

    • MongoDB connector stops processing oplog events after encountering "new primary" event DBZ-848

    • MySQL active-passive: brief data loss on failover when Debezium encounters new GTID channel DBZ-923

    • ConnectException: Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine DBZ-960

    • ConnectException during ALTER TABLE for non-whitelisted table DBZ-977

    • UnwrapFromMongoDbEnvelope fails when encountering full updates DBZ-987

    • UnwrapFromMongoDbEnvelope fails when encountering Tombstone messages DBZ-989

    • Postgres schema changes detection (not-null constraint) DBZ-1000

    • NPE in SqlServerConnectorTask#cleanupResources() if connector failed to start DBZ-1002

    • Explicitly initialize history topic in HistorizedRelationalDatabaseSchema DBZ-1003

    • BinlogReader ignores GTIDs for empty database DBZ-1005

    • NPE in MySqlConnectorTask.stop() DBZ-1006

    • The name of captured but not whitelisted table is not logged DBZ-1007

    • GTID set is not properly initialized after DB failover DBZ-1008

    • Postgres Connector fails on none nullable MACADDR field during initial snapshot DBZ-1009

    • Connector crashes with java.lang.NullPointerException when using multiple sinks to consume the messages DBZ-1017

    • Postgres connector fails upon event of recently deleted table DBZ-1021

    • ORA-46385: DML and DDL operations are not allowed on table "AUDSYS"."AUD$UNIFIED" DBZ-1023

    • Postgres plugin does not signal the end of snapshot properly DBZ-1024

    • MySQL Antlr runtime.NoViableAltException DBZ-1028

    • Debezium 0.8.2 and 0.8.3.Final Not Available on Confluent Hub DBZ-1030

    • Snapshot of tables with reserved names fails DBZ-1031

    • UnwrapFromMongoDbEnvelope doesn’t support operation header on tombstone messages DBZ-1032

    • Mysql binlog reader lost data if restart task when last binlog event is QUERY event. DBZ-1033

    • The same capture instance name is logged twice DBZ-1047

    Other changes

    This release includes also other changes:

    • MySQL 8 compatibility DBZ-688

    • Don’t hard code list of supported MySQL storage engines in Antlr grammar DBZ-992

    • Provide updated KSQL example DBZ-999

    • Update to Kafka 2.1 DBZ-1001

    • Skipt Antlr tests when tests are skipped DBZ-1004

    • Fix expected records counts in MySQL tests DBZ-1016

    • Cannot run tests against Kafka 1.x DBZ-1037

    • Configure MySQL Matrix testing job to test with and without GTID DBZ-1050

    Release 0.9.0.Beta1 (November 20th, 2018)

    Kafka compatibility

    This release has been built against Kafka Connect 2.0.1 and has been tested with version 2.0.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Beta1 from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    MySQL Connector now uses Antlr parser as the default.

    New Features

    • Add STATUS_STORAGE_TOPIC environment variable to container images DBZ-893

    • Support Postgres 11 in Decoderbufs DBZ-955

    • Define the data directory where tests are storing their data DBZ-963

    • Upgrade Kafka to 2.0.1 DBZ-979

    • Implement unified metrics across connectors DBZ-776

    • Initial snapshot using snapshot isolation level DBZ-941

    • Add decimal.handling.mode for SQLServer Configuration DBZ-953

    • Support pass-through of "database." properties to JDBC driver DBZ-964

    • Handle changes of table definitions and tables created while streaming DBZ-812

    Fixes

    This release includes the following fixes:

    • Error while parsing JSON column type for MySQL DBZ-935

    • wal2json CITEXT columns set to empty strings DBZ-937

    • Base docker image is deprecated DBZ-939

    • Mysql connector failed to parse add partition statement DBZ-959

    • PostgreSQL replication slots not updated in transactions DBZ-965

    • wal2json_streaming decoder does not provide the right plugin name DBZ-970

    • Create topics command doesn’t work in Kafka docker image DBZ-976

    • Antlr parser: support quoted engine names in DDL DBZ-990

    Other changes

    This release includes also other changes:

    • Switch to Antlr-based parser implementation by default DBZ-757

    • Support RENAME column syntax from MySQL 8.0 DBZ-780

    • Fix documentation of 'array.encoding' option DBZ-925

    • Support MongoDB 4.0 DBZ-974

    Release 0.9.0.Alpha2 (October 4th, 2018)

    Kafka compatibility

    This release has been built against Kafka Connect 2.0.0 and has been tested with version 2.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Alpha2 from any of the earlier 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    MySQL JDBC driver was upgraded to version 8.x. Kafka has been upgraded to version 2.0.0.

    New Features

    • Build Alpine Linux versions of the PostgreSQL containers DBZ-705

    • Refactor methods to read MySQL sytem variables DBZ-849

    • Correct param name for excludeColumns(String fullyQualifiedTableNames) DBZ-854

    • Make BinlogReader#informAboutUnknownTableIfRequired() log with tableId DBZ-855

    • MySQL identifier with dot or space could not be parsed DBZ-878

    • Use postgres:10 instead of postgres:10.0 as base docker image DBZ-929

    • Support temporary replication slots with Postgres >= 10 DBZ-934

    • Support white/black-listing Mongo fields DBZ-633

    • Postgres connector - add database, schema and table names to "source" section of records DBZ-866

    • Support renaming Mongo fields DBZ-881

    • use tcpKeepAlive by default DBZ-895

    • Hstore support in Postgresql-connector DBZ-898

    • Add connector type to source info DBZ-918

    Fixes

    This release includes the following fixes:

    • Global read lock not release when exception raised during snapshot DBZ-769

    • Abort loops in MongoPrimary#execute() if the connector is stopped DBZ-784

    • Initial synchronization is not interrupted DBZ-838

    • Kafka database history miscounting attempts even if there are more database history records to consume DBZ-853

    • Schema_only snapshot on idle server - offsets not stored after snapshot DBZ-859

    • DDL parsing in MySQL - default value of primary key is set to null DBZ-860

    • Antlr DDL parser exception for "create database …​ CHARSET=…​" DBZ-864

    • Error when MongoDB collection contains characters not compatible with kafka topic naming DBZ-865

    • AlterTableParserListener does not remove column definition listeners DBZ-869

    • MySQL parser does not recognize 0 as default value for date/time DBZ-870

    • Antlr parser ignores table whitelist filter DBZ-872

    • A new column might not be added with ALTER TABLE antlr parser DBZ-877

    • MySQLConnectorTask always reports it has the required Binlog file from MySQL DBZ-880

    • Execution of RecordsStreamProducer.closeConnections() is susceptible to race condition DBZ-887

    • Watch-topic command in docker image uses unsupported parameter DBZ-890

    • SQLServer should use only schema and table name in table naming DBZ-894

    • Prevent resending of duplicate change events after restart DBZ-897

    • PostgresConnection.initTypeRegistry() takes ~24 mins DBZ-899

    • java.time.format.DateTimeParseException: Text '1970-01-01 00:00:00' in mysql ALTER DBZ-901

    • org.antlr.v4.runtime.NoViableAltException on CREATE DEFINER=web@% PROCEDURE `…​ DBZ-903

    • MySQL default port is wrong in tutorial link DBZ-904

    • RecordsStreamProducer should report refresh of the schema due to different column count DBZ-907

    • MongoDbConnector returns obsolete config values during validation DBZ-908

    • Can’t parse create definition on the mysql connector DBZ-910

    • RecordsStreamProducer#columnValues() does not take into account unchanged TOASTed columns, refreshing table schemas unnecessarily DBZ-911

    • Wrong type in timeout call for Central wait release DBZ-914

    • Exception while parsing table schema with invalid default value for timestamp field DBZ-927

    • Discard null fields in MongoDB event flattening SMT DBZ-928

    Other changes

    This release includes also other changes:

    • Create Travis CI build for debezium-incubator repository DBZ-817

    • Cache prepared statements in JdbcConnection DBZ-819

    • Upgrade to Kafka 2.0.0 DBZ-858

    • Upgrad SQL Server image to CU9 GDR2 release DBZ-873

    • Speed-up Travis builds using parallel build DBZ-874

    • Add version format check into the release pipeline DBZ-884

    • Handle non-complete list of plugins DBZ-885

    • Parametrize wait time for Maven central sync DBZ-889

    • Assert non-empty release in release script DBZ-891

    • Upgrade Postgres driver to 42.2.5 DBZ-912

    • Upgrade MySQL JDBC driver to version 8.0.x DBZ-763

    • Upgrade MySQL binlog connector DBZ-764

    Release 0.9.0.Alpha1 (July 26th, 2018)

    Kafka compatibility

    This release has been built against Kafka Connect 1.1.1 and has been tested with version 1.1.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, or PostgreSQL connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 0.9.0.Alpha1 from any of the earlier 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 0.9.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 0.9.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The Oracle connector was storing event timestamp in the source block in field ts_sec. The time stamp is in fact measured in milliseconds to so the field was renamed to ts_ms.

    New Features

    • Ingest change data from SQL Server databases DBZ-40

    • Oracle connector implementation cont’d (initial snapshotting etc.) DBZ-716

    • Implement initial snapshotting for Oracle DBZ-720

    • Implement capturing of streamed changes DBZ-787

    • Implement initial snapshotting for SQL Server DBZ-788

    • Emit NUMBER columns as Int32/Int64 if precision and scale allow DBZ-804

    • Support heartbeat messages for Oracle DBZ-815

    • Upgrade to Kafka 1.1.1 DBZ-829

    Fixes

    This release includes the following fixes:

    • Offset remains with "snapshot" set to true after completing schema only snapshot DBZ-803

    • Misleading timestamp field name DBZ-795

    • Adjust scale of decimal values to column’s scale if present DBZ-818

    • Avoid NPE if commit is called before any offset is prepared DBZ-826

    Other changes

    This release includes also other changes:

    • Make DatabaseHistory set-up code re-usable DBZ-816

    • Use TableFilter contract instead of Predicate<TableId> DBZ-793

    • Expand SourceInfo DBZ-719

    • Provide Maven module and Docker set-up DBZ-786

    • Avoid a few raw type warnings DBZ-801

    \ No newline at end of file diff --git a/releases/1.0/index.html b/releases/1.0/index.html index 846b6e514e..fc65f1656d 100644 --- a/releases/1.0/index.html +++ b/releases/1.0/index.html @@ -1 +1 @@ - Debezium Release Series 1.0

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.13
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.11.1
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.9
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file + Debezium Release Series 1.0

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.13
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.11.1
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.9
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file diff --git a/releases/1.0/release-notes.html b/releases/1.0/release-notes.html index c2506d96f7..39ba9b1498 100644 --- a/releases/1.0/release-notes.html +++ b/releases/1.0/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.0

    Release Notes for Debezium 1.0

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.0.3.Final (March 12th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.3.Final from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • ExtractNewRecordState - add.source.fields should strip spaces from comma-separated list of fields DBZ-1772

    • Add ability to insert fields from op field in ExtractNewRecordState SMT DBZ-1452

    Fixes

    This release includes the following fixes:

    • Debezium skips messages after restart DBZ-1824

    • Unable to listen to binlogs for tables with a period in the table names DBZ-1834

    • Redundant calls to refresh schema when using user defined types in PostgreSQL DBZ-1849

    • postgres oid is too large to cast to integer DBZ-1850

    Other changes

    This release includes also other changes:

    • Test on top of AMQ Streams DBZ-924

    • Verify correctness of JMX metrics DBZ-1664

    • Test with AMQ Streams 1.4 connector operator DBZ-1714

    • hstore.handling.mode docs seem inaccurate (and map shows null values) DBZ-1758

    • Misleading warning message about uncommitted offsets DBZ-1840

    • Modularize tutorial DBZ-1845

    • Modularize the monitoring doc DBZ-1851

    • Document PostgreSQL connector metrics DBZ-1858

    Release 1.0.2.Final (February 27th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.2.Final from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The default value of MySQL config option gtid.new.channel.position was originally set to latest that should never be used in a production. The default value was thus set to earliest and the config option is scheduled for removal (DBZ-1705). The MySQL config option event.deserialization.failure.handling.mode was renamed to event.processing.failure.handling.mode to make the naming consistent with other connectors (DBZ-1826).

    New Features

    • Add option to skip unprocesseable event DBZ-1760

    Fixes

    This release includes the following fixes:

    • Postgres Connector losing data on restart due to commit() being called before events produced to Kafka DBZ-1766

    • TINYINT(1) value range restricted on snapshot. DBZ-1773

    • MySQL source connector fails while parsing new AWS RDS internal event DBZ-1775

    • Incosistency in MySQL TINYINT mapping definition DBZ-1800

    • Supply of message.key.columns disables primary keys. DBZ-1825

    Other changes

    This release includes also other changes:

    • Backport debezium-testing module to 1.0.x DBZ-1819

    Release 1.0.1.Final (February 7th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.1.Final from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Before updating the DecoderBufs logical decoding plug-in in your Postgres database to this new version (or when pulling the debezium/postgres container image for that new version), it is neccessary to upgrade the Debezium Postgres connector to 1.0.1.Final or 1.1.0.Alpha2 or later (DBZ-1052).

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Make slot creation in PostgreSQL more resilient DBZ-1684

    • Support boolean as default for INT(1) column in MySQL DBZ-1689

    • SIGNAL statement is not recognized by DDL parser DBZ-1691

    • When using in embedded mode MYSQL connector fails DBZ-1693

    • Connector error after adding a new not null column to table in Postgres DBZ-1698

    • MySQL connector fails to parse trigger DDL DBZ-1699

    • MySQL connector doesn’t use default value of connector.port DBZ-1712

    • ANTLR parser cannot parse MariaDB Table DDL with TRANSACTIONAL attribute DBZ-1733

    • Postgres connector does not support proxied connections DBZ-1738

    • GET DIAGNOSTICS statement not parseable DBZ-1740

    • MySql password logged out in debug log level DBZ-1748

    Other changes

    This release includes also other changes:

    • Add tests for using fallback values with default REPLICA IDENTITY DBZ-1158

    • Migrate all attribute name/value pairs to Antora component descriptors DBZ-1687

    • Remove overlap of different documentation config files DBZ-1729

    • Don’t fail upon receiving unkown operation events DBZ-1747

    • Upgrade to Mongo Java Driver version 3.12.1 DBZ-1761

    Release 1.0.0.Final (December 18th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.Final from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The incubating SerDes type io.debezium.serde.Serdes introduced in Debezium 1.0.0.CR1 has been renamed into io.debezium.serde.DebeziumSerdes to avoid conflicting with the Apache Kafka type of the same simple name (DBZ-1670).

    Like other relational connectors, the MySQL connector now supports the option snapshot.lock.timeout.ms, defaulting to a timeout of 10 sec. When upgrading a connector an doing new snapshots, this timeout now might apply, whereas the connector would have waited indefinitely before to obtain the required locks. In that case the timeout should be adjusted as per your specific requirements (DBZ-1671).

    New Features

    • Support streaming changes from SQL Server "AlwaysOn" replica DBZ-1642

    Fixes

    This release includes the following fixes:

    • Interpret Sql Server timestamp timezone correctly DBZ-1643

    • Sorting a HashSet only to put it back into a HashSet DBZ-1650

    • Function with RETURN only statement cannot be parsed DBZ-1659

    • Enum value resolution not working while streaming with wal2json or pgoutput DBZ-1680

    Other changes

    This release includes also other changes:

    • Globally ensure in tests that records can be serialized DBZ-824

    • Allow upstream teststuite to run with productised dependencies DBZ-1658

    • Upgrade to latest PostgreSQL driver 42.2.9 DBZ-1660

    • Generate warning for connectors with automatically dropped slots DBZ-1666

    • Regression test for MySQL dates in snapshot being off by one DBZ-1667

    • Rename Serdes to DebeziumSerdes DBZ-1670

    • Build against Apache Kafka 2.4 DBZ-1676

    • When PostgreSQL schema refresh fails, allow error to include root cause DBZ-1677

    • Prepare testsuite for RHEL 8 protobuf plugin RPM DBZ-1536

    Release 1.0.0.CR1 (December 10th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.1 and has been tested with version 2.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.CR1 from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    For the SQL Server and Oracle connectors, the snapshot mode initial_schema_only has been deprecated and will be removed in a future version. Please use schema_only instead (DBZ-585).

    New Features

    • Transaction level TRANSACTION_READ_COMMITTED not implemented DBZ-1480

    • Provide change event JSON Serde for Kafka Streams DBZ-1533

    • Provide MongoDB 4.2 image DBZ-1626

    • Support PostgreSQL enum types DBZ-920

    • Upgrade container images to Java 11 DBZ-969

    • Support MongoDB 4.0 transaction DBZ-1215

    • Make connection timeout configurable in MySQL connection URL DBZ-1632

    • Support for arrays of uuid (_uuid) DBZ-1637

    • Add test matrix for SQL Server DBZ-1644

    Fixes

    This release includes the following fixes:

    • Empty history topic treated as not existing DBZ-1201

    • Incorrect handling of type alias DBZ-1413

    • Blacklisted columns are not being filtered out when generating a Kafka message from a CDC event DBZ-1617

    • IoUtil Bugfix DBZ-1621

    • VariableLatch Bugfix DBZ-1622

    • The oracle connector scans too many objects while attempting to determine the most recent ddl time DBZ-1631

    • Connector does not update its state correctly when processing compound ALTER statement DBZ-1645

    • Outbox event router shouldn’t lower-case topic names DBZ-1648

    Other changes

    This release includes also other changes:

    • Consolidate configuration parameters DBZ-585

    • Merge the code for upscaling decimal values with scale lower than defined DBZ-825

    • Make Debezium project Java 11 compatible DBZ-1402

    • Run SourceClear DBZ-1602

    • Extend MySQL to test Enum with column.propagate.source.type DBZ-1636

    • Sticky ToC hides tables in PG connector docs DBZ-1652

    • Antora generates build warning DBZ-1654

    Release 1.0.0.Beta3 (November 14th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.1 and has been tested with version 2.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.Beta3 from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.Beta3 plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.Beta3 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Configuration parameter drop_on_stop of PostgreSQL connector has been renamed to drop.on.stop (DBZ-1595) to make it consistent with other parameter names.

    New Features

    • Standardize source info for Cassandra connector DBZ-1408

    • Clarify presence of old values when not using REPLICA IDENTITY FULL DBZ-1518

    • Propagate replicator exception so failure reason is available from Connect DBZ-1583

    • Envelope methods should accept Instant instead of long for "ts" parameter DBZ-1607

    Fixes

    This release includes the following fixes:

    • Debezium Erroneously Reporting No Tables to Capture DBZ-1519

    • Debezium Oracle connector attempting to analyze tables DBZ-1569

    • Null values in "before" are populated with "__debezium_unavailable_value" DBZ-1570

    • Postgresql 11+ pgoutput plugin error with truncate DBZ-1576

    • Regression of postgres Connector times out in schema discovery for DBs with many tables DBZ-1579

    • The ts_ms value is not correct during the snapshot processing DBZ-1588

    • LogInterceptor is not thread-safe DBZ-1590

    • Heartbeats are not generated for non-whitelisted tables DBZ-1592

    • Config tombstones.on.delete is missing from SQL Server Connector configDef DBZ-1593

    • AWS RDS Performance Insights screwed a little by non-closed statement in "SELECT COUNT(1) FROM pg_publication" DBZ-1596

    • Update Postgres documentation to use ts_ms instead of ts_usec DBZ-1610

    • Exception while trying snapshot schema of non-whitelisted table DBZ-1613

    Other changes

    This release includes also other changes:

    • Auto-format source code upon build DBZ-1392

    • Update documentation based on Technology Preview DBZ-1543

    • Reduce size of Postgres container images DBZ-1549

    • Debezium should not use SHARE UPDATE EXCLUSIVE MODE locks DBZ-1559

    • Allows tags to be passed to CI jobs DBZ-1578

    • Upgrade MongoDB driver to 3.11 DBZ-1597

    • Run formatter validation in Travis CI DBZ-1603

    • Place formatting rules into Maven module DBZ-1605

    • Upgrade to Kafka 2.3.1 DBZ-1612

    • Allow per-connector setting for schema/catalog precedence in TableId use DBZ-1555

    Release 1.0.0.Beta2 (October 24th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.Beta2 from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Update tooling image to use latest kafkacat DBZ-1522

    • Validate configured replication slot names DBZ-1525

    • Make password field to be hidden for MS SQL connector DBZ-1554

    • Raise a warning about growing backlog DBZ-1565

    • Support Postgres LTREE columns DBZ-1336

    Fixes

    This release includes the following fixes:

    • Aborting snapshot due to error when last running 'UNLOCK TABLES': Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine. DBZ-1428

    • MySQL Connector fails to parse DDL containing the keyword VISIBLE for index definitions DBZ-1534

    • MySQL connector fails to parse DDL - GRANT SESSION_VARIABLES_ADMIN…​ DBZ-1535

    • Mysql connector: The primary key cannot reference a non-existant column 'id' in table '*' DBZ-1560

    • Incorrect source struct’s collection field when dot is present in collection name DBZ-1563

    • Transaction left open after db snapshot DBZ-1564

    Other changes

    This release includes also other changes:

    • Add Postgres 12 to testing matrix DBZ-1542

    • Update Katacoda learning experience DBZ-1548

    Release 1.0.0.Beta1 (October 17th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.Beta1 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The ExtractNewDocumentState and EventRouter SMTs now propagate any heartbeat or schema change messages unchanged instead of dropping them as before. This is to ensure consistency with the ExtractNewRecordState SMT (DBZ-1513).

    The new Postgres connector option interval.handling.mode allows to control whether INTERVAL columns should be exported as microseconds (previous behavior, remains the default) or as ISO 8601 formatted string (DBZ-1498). The following upgrade order must be maintained when existing connectors capture INTERVAL columns:

    1. Upgrade the Debezium Kafka Connect Postgres connector

    2. Upgrade the logical decoding plug-in installed in the database

    3. (Optionally) switch interval.handling.mode to string

    In particular it should be avoided to upgrade the logical decoding plug-in before the connector, as this will cause no value to be exported for INTERVAL columns.

    New Features

    • Provide alternative mapping for INTERVAL DBZ-1498

    • Ensure message keys have correct field order DBZ-1507

    • Image incorrect on Deploying Debezium on OpenShift DBZ-1545

    • Indicate table locking issues in log DBZ-1280

    Fixes

    This release includes the following fixes:

    • Debezium fails to snapshot large databases DBZ-685

    • Connector Postgres runs out of disk space DBZ-892

    • Debezium-MySQL Connector Fails while parsing AWS RDS internal events DBZ-1492

    • MongoDB ExtractNewDocumentState SMT blocks heartbeat messages DBZ-1513

    • pgoutput string decoding depends on JVM default charset DBZ-1532

    • Whitespaces not stripped from table.whitelist DBZ-1546

    Other changes

    This release includes also other changes:

    • Upgrade to latest JBoss Parent POM DBZ-675

    • CheckStyle: Flag missing whitespace DBZ-1341

    • Upgrade to the latest Checkstyle plugin DBZ-1355

    • CheckStyle: no code after closing braces DBZ-1391

    • Add "adopters" file DBZ-1460

    • Add Google Analytics to Antora-published pages DBZ-1526

    • Create 0.10 RPM for postgres-decoderbufs DBZ-1540

    • Postgres documentation fixes DBZ-1544

    \ No newline at end of file + Release Notes for Debezium 1.0

    Release Notes for Debezium 1.0

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.0.3.Final (March 12th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.3.Final from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • ExtractNewRecordState - add.source.fields should strip spaces from comma-separated list of fields DBZ-1772

    • Add ability to insert fields from op field in ExtractNewRecordState SMT DBZ-1452

    Fixes

    This release includes the following fixes:

    • Debezium skips messages after restart DBZ-1824

    • Unable to listen to binlogs for tables with a period in the table names DBZ-1834

    • Redundant calls to refresh schema when using user defined types in PostgreSQL DBZ-1849

    • postgres oid is too large to cast to integer DBZ-1850

    Other changes

    This release includes also other changes:

    • Test on top of AMQ Streams DBZ-924

    • Verify correctness of JMX metrics DBZ-1664

    • Test with AMQ Streams 1.4 connector operator DBZ-1714

    • hstore.handling.mode docs seem inaccurate (and map shows null values) DBZ-1758

    • Misleading warning message about uncommitted offsets DBZ-1840

    • Modularize tutorial DBZ-1845

    • Modularize the monitoring doc DBZ-1851

    • Document PostgreSQL connector metrics DBZ-1858

    Release 1.0.2.Final (February 27th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.2.Final from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The default value of MySQL config option gtid.new.channel.position was originally set to latest that should never be used in a production. The default value was thus set to earliest and the config option is scheduled for removal (DBZ-1705). The MySQL config option event.deserialization.failure.handling.mode was renamed to event.processing.failure.handling.mode to make the naming consistent with other connectors (DBZ-1826).

    New Features

    • Add option to skip unprocesseable event DBZ-1760

    Fixes

    This release includes the following fixes:

    • Postgres Connector losing data on restart due to commit() being called before events produced to Kafka DBZ-1766

    • TINYINT(1) value range restricted on snapshot. DBZ-1773

    • MySQL source connector fails while parsing new AWS RDS internal event DBZ-1775

    • Incosistency in MySQL TINYINT mapping definition DBZ-1800

    • Supply of message.key.columns disables primary keys. DBZ-1825

    Other changes

    This release includes also other changes:

    • Backport debezium-testing module to 1.0.x DBZ-1819

    Release 1.0.1.Final (February 7th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.1.Final from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Before updating the DecoderBufs logical decoding plug-in in your Postgres database to this new version (or when pulling the debezium/postgres container image for that new version), it is neccessary to upgrade the Debezium Postgres connector to 1.0.1.Final or 1.1.0.Alpha2 or later (DBZ-1052).

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Make slot creation in PostgreSQL more resilient DBZ-1684

    • Support boolean as default for INT(1) column in MySQL DBZ-1689

    • SIGNAL statement is not recognized by DDL parser DBZ-1691

    • When using in embedded mode MYSQL connector fails DBZ-1693

    • Connector error after adding a new not null column to table in Postgres DBZ-1698

    • MySQL connector fails to parse trigger DDL DBZ-1699

    • MySQL connector doesn’t use default value of connector.port DBZ-1712

    • ANTLR parser cannot parse MariaDB Table DDL with TRANSACTIONAL attribute DBZ-1733

    • Postgres connector does not support proxied connections DBZ-1738

    • GET DIAGNOSTICS statement not parseable DBZ-1740

    • MySql password logged out in debug log level DBZ-1748

    Other changes

    This release includes also other changes:

    • Add tests for using fallback values with default REPLICA IDENTITY DBZ-1158

    • Migrate all attribute name/value pairs to Antora component descriptors DBZ-1687

    • Remove overlap of different documentation config files DBZ-1729

    • Don’t fail upon receiving unkown operation events DBZ-1747

    • Upgrade to Mongo Java Driver version 3.12.1 DBZ-1761

    Release 1.0.0.Final (December 18th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.Final from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The incubating SerDes type io.debezium.serde.Serdes introduced in Debezium 1.0.0.CR1 has been renamed into io.debezium.serde.DebeziumSerdes to avoid conflicting with the Apache Kafka type of the same simple name (DBZ-1670).

    Like other relational connectors, the MySQL connector now supports the option snapshot.lock.timeout.ms, defaulting to a timeout of 10 sec. When upgrading a connector an doing new snapshots, this timeout now might apply, whereas the connector would have waited indefinitely before to obtain the required locks. In that case the timeout should be adjusted as per your specific requirements (DBZ-1671).

    New Features

    • Support streaming changes from SQL Server "AlwaysOn" replica DBZ-1642

    Fixes

    This release includes the following fixes:

    • Interpret Sql Server timestamp timezone correctly DBZ-1643

    • Sorting a HashSet only to put it back into a HashSet DBZ-1650

    • Function with RETURN only statement cannot be parsed DBZ-1659

    • Enum value resolution not working while streaming with wal2json or pgoutput DBZ-1680

    Other changes

    This release includes also other changes:

    • Globally ensure in tests that records can be serialized DBZ-824

    • Allow upstream teststuite to run with productised dependencies DBZ-1658

    • Upgrade to latest PostgreSQL driver 42.2.9 DBZ-1660

    • Generate warning for connectors with automatically dropped slots DBZ-1666

    • Regression test for MySQL dates in snapshot being off by one DBZ-1667

    • Rename Serdes to DebeziumSerdes DBZ-1670

    • Build against Apache Kafka 2.4 DBZ-1676

    • When PostgreSQL schema refresh fails, allow error to include root cause DBZ-1677

    • Prepare testsuite for RHEL 8 protobuf plugin RPM DBZ-1536

    Release 1.0.0.CR1 (December 10th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.1 and has been tested with version 2.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.CR1 from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    For the SQL Server and Oracle connectors, the snapshot mode initial_schema_only has been deprecated and will be removed in a future version. Please use schema_only instead (DBZ-585).

    New Features

    • Transaction level TRANSACTION_READ_COMMITTED not implemented DBZ-1480

    • Provide change event JSON Serde for Kafka Streams DBZ-1533

    • Provide MongoDB 4.2 image DBZ-1626

    • Support PostgreSQL enum types DBZ-920

    • Upgrade container images to Java 11 DBZ-969

    • Support MongoDB 4.0 transaction DBZ-1215

    • Make connection timeout configurable in MySQL connection URL DBZ-1632

    • Support for arrays of uuid (_uuid) DBZ-1637

    • Add test matrix for SQL Server DBZ-1644

    Fixes

    This release includes the following fixes:

    • Empty history topic treated as not existing DBZ-1201

    • Incorrect handling of type alias DBZ-1413

    • Blacklisted columns are not being filtered out when generating a Kafka message from a CDC event DBZ-1617

    • IoUtil Bugfix DBZ-1621

    • VariableLatch Bugfix DBZ-1622

    • The oracle connector scans too many objects while attempting to determine the most recent ddl time DBZ-1631

    • Connector does not update its state correctly when processing compound ALTER statement DBZ-1645

    • Outbox event router shouldn’t lower-case topic names DBZ-1648

    Other changes

    This release includes also other changes:

    • Consolidate configuration parameters DBZ-585

    • Merge the code for upscaling decimal values with scale lower than defined DBZ-825

    • Make Debezium project Java 11 compatible DBZ-1402

    • Run SourceClear DBZ-1602

    • Extend MySQL to test Enum with column.propagate.source.type DBZ-1636

    • Sticky ToC hides tables in PG connector docs DBZ-1652

    • Antora generates build warning DBZ-1654

    Release 1.0.0.Beta3 (November 14th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.1 and has been tested with version 2.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.Beta3 from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.Beta3 plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.Beta3 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Configuration parameter drop_on_stop of PostgreSQL connector has been renamed to drop.on.stop (DBZ-1595) to make it consistent with other parameter names.

    New Features

    • Standardize source info for Cassandra connector DBZ-1408

    • Clarify presence of old values when not using REPLICA IDENTITY FULL DBZ-1518

    • Propagate replicator exception so failure reason is available from Connect DBZ-1583

    • Envelope methods should accept Instant instead of long for "ts" parameter DBZ-1607

    Fixes

    This release includes the following fixes:

    • Debezium Erroneously Reporting No Tables to Capture DBZ-1519

    • Debezium Oracle connector attempting to analyze tables DBZ-1569

    • Null values in "before" are populated with "__debezium_unavailable_value" DBZ-1570

    • Postgresql 11+ pgoutput plugin error with truncate DBZ-1576

    • Regression of postgres Connector times out in schema discovery for DBs with many tables DBZ-1579

    • The ts_ms value is not correct during the snapshot processing DBZ-1588

    • LogInterceptor is not thread-safe DBZ-1590

    • Heartbeats are not generated for non-whitelisted tables DBZ-1592

    • Config tombstones.on.delete is missing from SQL Server Connector configDef DBZ-1593

    • AWS RDS Performance Insights screwed a little by non-closed statement in "SELECT COUNT(1) FROM pg_publication" DBZ-1596

    • Update Postgres documentation to use ts_ms instead of ts_usec DBZ-1610

    • Exception while trying snapshot schema of non-whitelisted table DBZ-1613

    Other changes

    This release includes also other changes:

    • Auto-format source code upon build DBZ-1392

    • Update documentation based on Technology Preview DBZ-1543

    • Reduce size of Postgres container images DBZ-1549

    • Debezium should not use SHARE UPDATE EXCLUSIVE MODE locks DBZ-1559

    • Allows tags to be passed to CI jobs DBZ-1578

    • Upgrade MongoDB driver to 3.11 DBZ-1597

    • Run formatter validation in Travis CI DBZ-1603

    • Place formatting rules into Maven module DBZ-1605

    • Upgrade to Kafka 2.3.1 DBZ-1612

    • Allow per-connector setting for schema/catalog precedence in TableId use DBZ-1555

    Release 1.0.0.Beta2 (October 24th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.Beta2 from any of the earlier 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Update tooling image to use latest kafkacat DBZ-1522

    • Validate configured replication slot names DBZ-1525

    • Make password field to be hidden for MS SQL connector DBZ-1554

    • Raise a warning about growing backlog DBZ-1565

    • Support Postgres LTREE columns DBZ-1336

    Fixes

    This release includes the following fixes:

    • Aborting snapshot due to error when last running 'UNLOCK TABLES': Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine. DBZ-1428

    • MySQL Connector fails to parse DDL containing the keyword VISIBLE for index definitions DBZ-1534

    • MySQL connector fails to parse DDL - GRANT SESSION_VARIABLES_ADMIN…​ DBZ-1535

    • Mysql connector: The primary key cannot reference a non-existant column 'id' in table '*' DBZ-1560

    • Incorrect source struct’s collection field when dot is present in collection name DBZ-1563

    • Transaction left open after db snapshot DBZ-1564

    Other changes

    This release includes also other changes:

    • Add Postgres 12 to testing matrix DBZ-1542

    • Update Katacoda learning experience DBZ-1548

    Release 1.0.0.Beta1 (October 17th, 2019)

    Kafka compatibility

    This release has been built against Kafka Connect 2.3.0 and has been tested with version 2.3.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.0.0.Beta1 from any of the earlier 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.0.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.0.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The ExtractNewDocumentState and EventRouter SMTs now propagate any heartbeat or schema change messages unchanged instead of dropping them as before. This is to ensure consistency with the ExtractNewRecordState SMT (DBZ-1513).

    The new Postgres connector option interval.handling.mode allows to control whether INTERVAL columns should be exported as microseconds (previous behavior, remains the default) or as ISO 8601 formatted string (DBZ-1498). The following upgrade order must be maintained when existing connectors capture INTERVAL columns:

    1. Upgrade the Debezium Kafka Connect Postgres connector

    2. Upgrade the logical decoding plug-in installed in the database

    3. (Optionally) switch interval.handling.mode to string

    In particular it should be avoided to upgrade the logical decoding plug-in before the connector, as this will cause no value to be exported for INTERVAL columns.

    New Features

    • Provide alternative mapping for INTERVAL DBZ-1498

    • Ensure message keys have correct field order DBZ-1507

    • Image incorrect on Deploying Debezium on OpenShift DBZ-1545

    • Indicate table locking issues in log DBZ-1280

    Fixes

    This release includes the following fixes:

    • Debezium fails to snapshot large databases DBZ-685

    • Connector Postgres runs out of disk space DBZ-892

    • Debezium-MySQL Connector Fails while parsing AWS RDS internal events DBZ-1492

    • MongoDB ExtractNewDocumentState SMT blocks heartbeat messages DBZ-1513

    • pgoutput string decoding depends on JVM default charset DBZ-1532

    • Whitespaces not stripped from table.whitelist DBZ-1546

    Other changes

    This release includes also other changes:

    • Upgrade to latest JBoss Parent POM DBZ-675

    • CheckStyle: Flag missing whitespace DBZ-1341

    • Upgrade to the latest Checkstyle plugin DBZ-1355

    • CheckStyle: no code after closing braces DBZ-1391

    • Add "adopters" file DBZ-1460

    • Add Google Analytics to Antora-published pages DBZ-1526

    • Create 0.10 RPM for postgres-decoderbufs DBZ-1540

    • Postgres documentation fixes DBZ-1544

    \ No newline at end of file diff --git a/releases/1.1/index.html b/releases/1.1/index.html index 9dcc08504f..80b2a336a5 100644 --- a/releases/1.1/index.html +++ b/releases/1.1/index.html @@ -1 +1 @@ - Debezium Release Series 1.1

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.13
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.12.2
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.9
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file + Debezium Release Series 1.1

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.13
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.12.2
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.9
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file diff --git a/releases/1.1/release-notes.html b/releases/1.1/release-notes.html index 0cb41745d5..a648c553eb 100644 --- a/releases/1.1/release-notes.html +++ b/releases/1.1/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.1

    Release Notes for Debezium 1.1

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.1.2.Final (June 2nd, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL, SQL Server, Cassandra or DB2 connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.2.Final from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Db2 and Oracle connectors were using database name which is fixed to configure table filters. This is now simplified so only <schema_name>.<table_name> naming is used. Db2, Oracle and SQL Server connectors were using database name which is fixed to configure column/key filters and mappers. This is now simplified so only <schema_name>.<table_name>.<column_name> naming is used (DBZ-1312).

    Db2 connector was providing initial_schema_only snapshot mode. This name was not consistent with other connectors so the mode was renamed to schema_only (DBZ-2051).

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Outbox Quarkus Extension throws NPE in quarkus:dev mode DBZ-1966

    • [Doc] Debezium User Guide should provide example of DB connector yaml and deployment instructions DBZ-2011

    • MySQL alias FLUSH TABLE not handled DBZ-2047

    • DDL statement throws error if compression keyword contains backticks (``) DBZ-2062

    • Error and connector stops when DDL contains algorithm=instant DBZ-2067

    • Unable to parse MySQL ALTER statement with named primary key DBZ-2080

    Other changes

    This release includes also other changes:

    • Adding tests and doc updates around column masking and truncating DBZ-775

    • Align snapshot/streaming semantics in MongoDB documentation DBZ-1901

    • Only top-level menu items shown for MySQL connector 1.1 docs DBZ-1980

    • Upgrade to Apache Kafka 2.5.0 and Confluent Platform 5.5.0 DBZ-1981

    • Fix broken link DBZ-1983

    • Avoid broken cross-book references in downstream docs DBZ-1999

    • Fix wrong attribute name in MongoDB connector DBZ-2006

    • Remove additional Jackson dependencies as of AK 2.5 DBZ-2076

    • Tar packages must use posix format DBZ-2088

    • Upgrade to Quarkus 1.5.0.Final DBZ-2119

    Release 1.1.1.Final (April 17th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.1 and has been tested with version 2.4.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL, SQL Server, Cassandra or DB2 connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.1.Final from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Improve documentation on WAL disk space usage for Postgres connector DBZ-1732

    • Add docs for mask column and truncate column features DBZ-1954

    • Snapshot metrics shows TotalNumberOfEventsSeen as zero DBZ-1932

    Fixes

    This release includes the following fixes:

    • Snapshot lock timeout setting is not documented DBZ-1914

    • AvroRuntimeException when publishing transaction metadata DBZ-1915

    • Connector restart logic throttles for the first 2 seconds DBZ-1918

    • Wal2json empty change event could cause NPE above version 1.0.3.final DBZ-1922

    • Broken internal links and anchors in documentation DBZ-1935

    • Dokumentation files in modules create separate pages, should be partials instead DBZ-1944

    • Validation of binlog_row_image is not compatible with MySQL 5.5 DBZ-1950

    • High CPU usage when idle DBZ-1960

    Other changes

    This release includes also other changes:

    • Fix typo in Quarkus Outbox extension documentation DBZ-1902

    • Documentation should link to Apache Kafka upstream docs DBZ-1906

    • Restore documentation of MySQL event structures DBZ-1919

    • Update snapshot.mode options in SQL Server documentation DBZ-1924

    • Remove obsolete metrics from downstream docs DBZ-1947

    Release 1.1.0.Final (March 23rd, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL, SQL Server, Cassandra or DB2 connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.Final from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • The Postgres connector heartbeat should optionally write back a heartbeat change to the DB DBZ-1815

    Fixes

    This release includes the following fixes:

    • Postgres Connector ignoring confirmed_flush_lsn and skipping ahead to latest txn DBZ-1730

    • Postgresql money error handling DBZ-1755

    • MongoDB tests not working correctly DBZ-1867

    • MongoDB transaction metadata topic generates extra events DBZ-1874

    • NullPointerException on delete in ExtractNewRecordState class DBZ-1876

    • MongoDB connector unrecoverable exception DBZ-1880

    • High log volume from: "Awaiting end of restart backoff period" logs DBZ-1889

    • Kafka records from one Cassandra table get published to the kafka queue of another Cassandra table DBZ-1892

    Other changes

    This release includes also other changes:

    • Use snapshot versions in master branch documentation DBZ-1793

    • Misc docs issues DBZ-1798

    • Outbox Quarkus Extension: Clarify default column types when using defaults. DBZ-1804

    • Create CI job to run OpenShift test DBZ-1817

    • Failing test jobs for Mongo and SQL Server due to insecure maven registry DBZ-1837

    • Support retriable exceptions with embedded engine DBZ-1857

    • Modularize Debezium logging doc DBZ-1861

    • Centralize closing of coordinator DBZ-1863

    • Assert format of commit messages DBZ-1868

    • Bump MongoDB java driver to the latest version 3.12.2 DBZ-1869

    • Add Travis CI task for MongoDB 3.2 DBZ-1871

    • Unstable tests for PostgreSQL DBZ-1875

    • Add MongoDB JMX integration tests DBZ-1879

    Release 1.1.0.CR1 (March 11th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL, SQL Server, Cassandra or DB2 connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.CR1 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The default value of MySQL config option gtid.new.channel.position was originally set to latest that should never be used in a production. The default value was thus set to earliest and the config option is scheduled for removal (DBZ-1705). The MySQL config option event.deserialization.failure.handling.mode was renamed to event.processing.failure.handling.mode to make the naming consistent with other connectors (DBZ-1826). MongoDB config option field.renames adds renamed field (wtih null value) even when the source field is missing. This was identified as a defect and the non-present fields are no longer added (DBZ-1848).

    New Features

    • Distinguish between public (API) and internal parts of Debezium DBZ-234

    • Default gtid.new.channel.position to earliest DBZ-1705

    • Add option to skip unprocesseable event DBZ-1760

    • ExtractNewRecordState - add.source.fields should strip spaces from comma-separated list of fields DBZ-1772

    • Add support for update events for sharded MongoDB collections DBZ-1781

    • Useless/meaningless parameter in function DBZ-1805

    • Replace BlockEventQueue with Debezium ChangeEventQueue DBZ-1820

    • Option to configure column.propagate.source.type on a per-type basis, not per column-name basis DBZ-1830

    • Support MongoDB Oplog operations as config DBZ-1831

    • Add app metrics for mongodb connector to jmx DBZ-845

    • Provide SPI to override schema and value conversion for specific columns DBZ-1134

    • Retry polling on configured exceptions DBZ-1723

    Fixes

    This release includes the following fixes:

    • CDC Event Schema Doesn’t Change After 2 Fields Switch Names and Places DBZ-1694

    • TINYINT(1) value range restricted on snapshot. DBZ-1773

    • MySQL source connector fails while parsing new AWS RDS internal event DBZ-1775

    • Connector fails when performing a Hot Schema Update in SQLServer (Data row is smaller than a column index). DBZ-1778

    • Incosistency in MySQL TINYINT mapping definition DBZ-1800

    • Debezium skips messages after restart DBZ-1824

    • Supply of message.key.columns disables primary keys. DBZ-1825

    • MySql connector fails after CREATE TABLE IF NOT EXISTS table_A, given table_A does exist already DBZ-1833

    • Unable to listen to binlogs for tables with a period in the table names DBZ-1834

    • Mongodb field.renames will add renamed field even when source field is missing DBZ-1848

    • Redundant calls to refresh schema when using user defined types in PostgreSQL DBZ-1849

    • postgres oid is too large to cast to integer DBZ-1850

    Other changes

    This release includes also other changes:

    • Verify correctness of JMX metrics DBZ-1664

    • Document that server name option must not use hyphen in name DBZ-1704

    • Move MongoDB connector to base framework DBZ-1726

    • hstore.handling.mode docs seem inaccurate (and map shows null values) DBZ-1758

    • Document transaction metadata topic name DBZ-1779

    • Remove Microsoft references in Db2 connector comments DBZ-1794

    • Fix link to CONTRIBUTE.md in debezium-incubator repository README.md DBZ-1795

    • Invalid dependency definition in Quarkus ITs DBZ-1799

    • Document MySQL boolean handling DBZ-1801

    • Jackson dependency shouldn’t be optional in Testcontainers module DBZ-1803

    • Change Db2 configuration for faster test execution DBZ-1809

    • MySQL: Rename event.deserialization.failure.handling.mode to event.processing.failure.handling.mode DBZ-1826

    • Misleading warning message about uncommitted offsets DBZ-1840

    • Missing info on DB2 connector in incubator README file DBZ-1842

    • Only replace log levels if LOG_LEVEL var is set DBZ-1843

    • Modularize tutorial DBZ-1845

    • Modularize the monitoring doc DBZ-1851

    • Remove deprecated methods from SnapshotProgressListener DBZ-1856

    • Document PostgreSQL connector metrics DBZ-1858

    Release 1.1.0.Beta2 (February 13th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.Beta2 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Add ability to insert fields from op field in ExtractNewRecordState SMT DBZ-1452

    • Integrates with TestContainers project DBZ-1722

    Fixes

    This release includes the following fixes:

    • Postgres Connector losing data on restart due to commit() being called before events produced to Kafka DBZ-1766

    • DBREF fields causes SchemaParseException using New Record State Extraction SMT and Avro converter DBZ-1767

    Other changes

    This release includes also other changes:

    • Superfluous whitespace in intra-level ToC sidebar DBZ-1668

    • Outbox Quarkus Extension follow-up tasks DBZ-1711

    • DB2 connector follow-up tasks DBZ-1752

    • Unwrap SMT demo not compatible with ES 6.1+ DBZ-1756

    • Instable SQL Server test DBZ-1764

    • Remove Db2 JDBC driver from assembly package DBZ-1776

    • Fix PostgresConnectorIT.shouldOutputRecordsInCloudEventsFormat test DBZ-1783

    • Use "application/avro" as data content type in CloudEvents DBZ-1784

    • Update Standard Tutorials/Examples with DB2 DBZ-1558

    Release 1.1.0.Beta1 (February 5th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.Beta1 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Before updating the DecoderBufs logical decoding plug-in in your Postgres database to this new version (or when pulling the debezium/postgres container image for that new version), it is neccessary to upgrade the Debezium Postgres connector to 1.0.1.Final or 1.1.0.Alpha2 or later (DBZ-1052).

    The ExtractNewDocumentState SMT to be used with the Debezium MongoDB connector will convert Date and Timestamp fields now into the org.apache.kafka.connect.data.Timestam`p logical type, clarifying its semantics. The schema type itself remains unchanged as `int64. Please note that the resolution of Timestamp is seconds as per the semantics of that type in MongoDB. (DBZ-1717).

    New Features

    • Create a plug-in for DB2 streaming DBZ-695

    • Add topic routing by field option for New Record State Extraction DBZ-1715

    • Generate date(time) field types in the Kafka Connect data structure DBZ-1717

    • Publish TX boundary markers on a TX metadata topic DBZ-1052

    • Replace connectorName with kafkaTopicPrefix in kafka key/value schema DBZ-1763

    Fixes

    This release includes the following fixes:

    • Connector error after adding a new not null column to table in Postgres DBZ-1698

    • MySQL connector doesn’t use default value of connector.port DBZ-1712

    • Fix broken images in Antora and brush up AsciiDoc DBZ-1725

    • ANTLR parser cannot parse MariaDB Table DDL with TRANSACTIONAL attribute DBZ-1733

    • Postgres connector does not support proxied connections DBZ-1738

    • GET DIAGNOSTICS statement not parseable DBZ-1740

    • Examples use http access to Maven repos which is no longer available DBZ-1741

    • MySql password logged out in debug log level DBZ-1748

    • Cannot shutdown PostgreSQL if there is an active Debezium connector DBZ-1727

    Other changes

    This release includes also other changes:

    • Add tests for using fallback values with default REPLICA IDENTITY DBZ-1158

    • Migrate all attribute name/value pairs to Antora component descriptors DBZ-1687

    • Upgrade to Awestruct 0.6.0 DBZ-1719

    • Run CI tests for delivered non-connector modules (like Quarkus) DBZ-1724

    • Remove overlap of different documentation config files DBZ-1729

    • Don’t fail upon receiving unkown operation events DBZ-1747

    • Provide a method to identify an envelope schema DBZ-1751

    • Upgrade to Mongo Java Driver version 3.12.1 DBZ-1761

    • Create initial Proposal for DB2 Source Connector DBZ-1509

    • Review Pull Request for DB2 Connector DBZ-1527

    • Test Set up of the DB2 Test Instance DBZ-1556

    • Create Documentation for the DB2 Connector DBZ-1557

    • Verify support of all DB2 types DBZ-1708

    Release 1.1.0.Alpha1 (January 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.Alpha1 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    When using the outbox event routing SMT and configuring a column from which to obtain the Kafka record timestamp from (table.field.event.timestamp option), then that value could have been exported as milliseconds, microseconds or nanoseconds, based on the source column’s definition. As of this release, the timestamp always be exported as milliseconds (DBZ-1707).

    The deprecated Postgres connector option slot.drop_on_stop has been removed; use slot.drop.on.stop instead (DBZ-1600).

    New Features

    • MongoDB authentication against non-admin authsource DBZ-1168

    • Oracle: Add support for different representations of "NUMBER" Data Type DBZ-1552

    • Update Mongo Java driver to version 3.12.0 DBZ-1690

    • Support exporting change events in "CloudEvents" format DBZ-1292

    • Build Quarkus extension facilitating implementations of the outbox pattern DBZ-1478

    • Support column masking option for Postgres DBZ-1685

    Fixes

    This release includes the following fixes:

    • Make slot creation in PostgreSQL more resilient DBZ-1684

    • SQLserver type time(4)…​time(7) lost nanoseconds DBZ-1688

    • Support boolean as default for INT(1) column in MySQL DBZ-1689

    • SIGNAL statement is not recognized by DDL parser DBZ-1691

    • When using in embedded mode MYSQL connector fails DBZ-1693

    • MySQL connector fails to parse trigger DDL DBZ-1699

    Other changes

    This release includes also other changes:

    • Update outbox routing example DBZ-1673

    • Add option to JSON change event SerDe for ignoring unknown properties DBZ-1703

    • Update debezium/awestruct image to use Antora 2.3 alpha 2 DBZ-1713

    \ No newline at end of file + Release Notes for Debezium 1.1

    Release Notes for Debezium 1.1

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.1.2.Final (June 2nd, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL, SQL Server, Cassandra or DB2 connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.2.Final from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Db2 and Oracle connectors were using database name which is fixed to configure table filters. This is now simplified so only <schema_name>.<table_name> naming is used. Db2, Oracle and SQL Server connectors were using database name which is fixed to configure column/key filters and mappers. This is now simplified so only <schema_name>.<table_name>.<column_name> naming is used (DBZ-1312).

    Db2 connector was providing initial_schema_only snapshot mode. This name was not consistent with other connectors so the mode was renamed to schema_only (DBZ-2051).

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Outbox Quarkus Extension throws NPE in quarkus:dev mode DBZ-1966

    • [Doc] Debezium User Guide should provide example of DB connector yaml and deployment instructions DBZ-2011

    • MySQL alias FLUSH TABLE not handled DBZ-2047

    • DDL statement throws error if compression keyword contains backticks (``) DBZ-2062

    • Error and connector stops when DDL contains algorithm=instant DBZ-2067

    • Unable to parse MySQL ALTER statement with named primary key DBZ-2080

    Other changes

    This release includes also other changes:

    • Adding tests and doc updates around column masking and truncating DBZ-775

    • Align snapshot/streaming semantics in MongoDB documentation DBZ-1901

    • Only top-level menu items shown for MySQL connector 1.1 docs DBZ-1980

    • Upgrade to Apache Kafka 2.5.0 and Confluent Platform 5.5.0 DBZ-1981

    • Fix broken link DBZ-1983

    • Avoid broken cross-book references in downstream docs DBZ-1999

    • Fix wrong attribute name in MongoDB connector DBZ-2006

    • Remove additional Jackson dependencies as of AK 2.5 DBZ-2076

    • Tar packages must use posix format DBZ-2088

    • Upgrade to Quarkus 1.5.0.Final DBZ-2119

    Release 1.1.1.Final (April 17th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.1 and has been tested with version 2.4.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL, SQL Server, Cassandra or DB2 connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.1.Final from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Improve documentation on WAL disk space usage for Postgres connector DBZ-1732

    • Add docs for mask column and truncate column features DBZ-1954

    • Snapshot metrics shows TotalNumberOfEventsSeen as zero DBZ-1932

    Fixes

    This release includes the following fixes:

    • Snapshot lock timeout setting is not documented DBZ-1914

    • AvroRuntimeException when publishing transaction metadata DBZ-1915

    • Connector restart logic throttles for the first 2 seconds DBZ-1918

    • Wal2json empty change event could cause NPE above version 1.0.3.final DBZ-1922

    • Broken internal links and anchors in documentation DBZ-1935

    • Dokumentation files in modules create separate pages, should be partials instead DBZ-1944

    • Validation of binlog_row_image is not compatible with MySQL 5.5 DBZ-1950

    • High CPU usage when idle DBZ-1960

    Other changes

    This release includes also other changes:

    • Fix typo in Quarkus Outbox extension documentation DBZ-1902

    • Documentation should link to Apache Kafka upstream docs DBZ-1906

    • Restore documentation of MySQL event structures DBZ-1919

    • Update snapshot.mode options in SQL Server documentation DBZ-1924

    • Remove obsolete metrics from downstream docs DBZ-1947

    Release 1.1.0.Final (March 23rd, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL, SQL Server, Cassandra or DB2 connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.Final from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • The Postgres connector heartbeat should optionally write back a heartbeat change to the DB DBZ-1815

    Fixes

    This release includes the following fixes:

    • Postgres Connector ignoring confirmed_flush_lsn and skipping ahead to latest txn DBZ-1730

    • Postgresql money error handling DBZ-1755

    • MongoDB tests not working correctly DBZ-1867

    • MongoDB transaction metadata topic generates extra events DBZ-1874

    • NullPointerException on delete in ExtractNewRecordState class DBZ-1876

    • MongoDB connector unrecoverable exception DBZ-1880

    • High log volume from: "Awaiting end of restart backoff period" logs DBZ-1889

    • Kafka records from one Cassandra table get published to the kafka queue of another Cassandra table DBZ-1892

    Other changes

    This release includes also other changes:

    • Use snapshot versions in master branch documentation DBZ-1793

    • Misc docs issues DBZ-1798

    • Outbox Quarkus Extension: Clarify default column types when using defaults. DBZ-1804

    • Create CI job to run OpenShift test DBZ-1817

    • Failing test jobs for Mongo and SQL Server due to insecure maven registry DBZ-1837

    • Support retriable exceptions with embedded engine DBZ-1857

    • Modularize Debezium logging doc DBZ-1861

    • Centralize closing of coordinator DBZ-1863

    • Assert format of commit messages DBZ-1868

    • Bump MongoDB java driver to the latest version 3.12.2 DBZ-1869

    • Add Travis CI task for MongoDB 3.2 DBZ-1871

    • Unstable tests for PostgreSQL DBZ-1875

    • Add MongoDB JMX integration tests DBZ-1879

    Release 1.1.0.CR1 (March 11th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL, SQL Server, Cassandra or DB2 connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.CR1 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The default value of MySQL config option gtid.new.channel.position was originally set to latest that should never be used in a production. The default value was thus set to earliest and the config option is scheduled for removal (DBZ-1705). The MySQL config option event.deserialization.failure.handling.mode was renamed to event.processing.failure.handling.mode to make the naming consistent with other connectors (DBZ-1826). MongoDB config option field.renames adds renamed field (wtih null value) even when the source field is missing. This was identified as a defect and the non-present fields are no longer added (DBZ-1848).

    New Features

    • Distinguish between public (API) and internal parts of Debezium DBZ-234

    • Default gtid.new.channel.position to earliest DBZ-1705

    • Add option to skip unprocesseable event DBZ-1760

    • ExtractNewRecordState - add.source.fields should strip spaces from comma-separated list of fields DBZ-1772

    • Add support for update events for sharded MongoDB collections DBZ-1781

    • Useless/meaningless parameter in function DBZ-1805

    • Replace BlockEventQueue with Debezium ChangeEventQueue DBZ-1820

    • Option to configure column.propagate.source.type on a per-type basis, not per column-name basis DBZ-1830

    • Support MongoDB Oplog operations as config DBZ-1831

    • Add app metrics for mongodb connector to jmx DBZ-845

    • Provide SPI to override schema and value conversion for specific columns DBZ-1134

    • Retry polling on configured exceptions DBZ-1723

    Fixes

    This release includes the following fixes:

    • CDC Event Schema Doesn’t Change After 2 Fields Switch Names and Places DBZ-1694

    • TINYINT(1) value range restricted on snapshot. DBZ-1773

    • MySQL source connector fails while parsing new AWS RDS internal event DBZ-1775

    • Connector fails when performing a Hot Schema Update in SQLServer (Data row is smaller than a column index). DBZ-1778

    • Incosistency in MySQL TINYINT mapping definition DBZ-1800

    • Debezium skips messages after restart DBZ-1824

    • Supply of message.key.columns disables primary keys. DBZ-1825

    • MySql connector fails after CREATE TABLE IF NOT EXISTS table_A, given table_A does exist already DBZ-1833

    • Unable to listen to binlogs for tables with a period in the table names DBZ-1834

    • Mongodb field.renames will add renamed field even when source field is missing DBZ-1848

    • Redundant calls to refresh schema when using user defined types in PostgreSQL DBZ-1849

    • postgres oid is too large to cast to integer DBZ-1850

    Other changes

    This release includes also other changes:

    • Verify correctness of JMX metrics DBZ-1664

    • Document that server name option must not use hyphen in name DBZ-1704

    • Move MongoDB connector to base framework DBZ-1726

    • hstore.handling.mode docs seem inaccurate (and map shows null values) DBZ-1758

    • Document transaction metadata topic name DBZ-1779

    • Remove Microsoft references in Db2 connector comments DBZ-1794

    • Fix link to CONTRIBUTE.md in debezium-incubator repository README.md DBZ-1795

    • Invalid dependency definition in Quarkus ITs DBZ-1799

    • Document MySQL boolean handling DBZ-1801

    • Jackson dependency shouldn’t be optional in Testcontainers module DBZ-1803

    • Change Db2 configuration for faster test execution DBZ-1809

    • MySQL: Rename event.deserialization.failure.handling.mode to event.processing.failure.handling.mode DBZ-1826

    • Misleading warning message about uncommitted offsets DBZ-1840

    • Missing info on DB2 connector in incubator README file DBZ-1842

    • Only replace log levels if LOG_LEVEL var is set DBZ-1843

    • Modularize tutorial DBZ-1845

    • Modularize the monitoring doc DBZ-1851

    • Remove deprecated methods from SnapshotProgressListener DBZ-1856

    • Document PostgreSQL connector metrics DBZ-1858

    Release 1.1.0.Beta2 (February 13th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.Beta2 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Add ability to insert fields from op field in ExtractNewRecordState SMT DBZ-1452

    • Integrates with TestContainers project DBZ-1722

    Fixes

    This release includes the following fixes:

    • Postgres Connector losing data on restart due to commit() being called before events produced to Kafka DBZ-1766

    • DBREF fields causes SchemaParseException using New Record State Extraction SMT and Avro converter DBZ-1767

    Other changes

    This release includes also other changes:

    • Superfluous whitespace in intra-level ToC sidebar DBZ-1668

    • Outbox Quarkus Extension follow-up tasks DBZ-1711

    • DB2 connector follow-up tasks DBZ-1752

    • Unwrap SMT demo not compatible with ES 6.1+ DBZ-1756

    • Instable SQL Server test DBZ-1764

    • Remove Db2 JDBC driver from assembly package DBZ-1776

    • Fix PostgresConnectorIT.shouldOutputRecordsInCloudEventsFormat test DBZ-1783

    • Use "application/avro" as data content type in CloudEvents DBZ-1784

    • Update Standard Tutorials/Examples with DB2 DBZ-1558

    Release 1.1.0.Beta1 (February 5th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.Beta1 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Before updating the DecoderBufs logical decoding plug-in in your Postgres database to this new version (or when pulling the debezium/postgres container image for that new version), it is neccessary to upgrade the Debezium Postgres connector to 1.0.1.Final or 1.1.0.Alpha2 or later (DBZ-1052).

    The ExtractNewDocumentState SMT to be used with the Debezium MongoDB connector will convert Date and Timestamp fields now into the org.apache.kafka.connect.data.Timestam`p logical type, clarifying its semantics. The schema type itself remains unchanged as `int64. Please note that the resolution of Timestamp is seconds as per the semantics of that type in MongoDB. (DBZ-1717).

    New Features

    • Create a plug-in for DB2 streaming DBZ-695

    • Add topic routing by field option for New Record State Extraction DBZ-1715

    • Generate date(time) field types in the Kafka Connect data structure DBZ-1717

    • Publish TX boundary markers on a TX metadata topic DBZ-1052

    • Replace connectorName with kafkaTopicPrefix in kafka key/value schema DBZ-1763

    Fixes

    This release includes the following fixes:

    • Connector error after adding a new not null column to table in Postgres DBZ-1698

    • MySQL connector doesn’t use default value of connector.port DBZ-1712

    • Fix broken images in Antora and brush up AsciiDoc DBZ-1725

    • ANTLR parser cannot parse MariaDB Table DDL with TRANSACTIONAL attribute DBZ-1733

    • Postgres connector does not support proxied connections DBZ-1738

    • GET DIAGNOSTICS statement not parseable DBZ-1740

    • Examples use http access to Maven repos which is no longer available DBZ-1741

    • MySql password logged out in debug log level DBZ-1748

    • Cannot shutdown PostgreSQL if there is an active Debezium connector DBZ-1727

    Other changes

    This release includes also other changes:

    • Add tests for using fallback values with default REPLICA IDENTITY DBZ-1158

    • Migrate all attribute name/value pairs to Antora component descriptors DBZ-1687

    • Upgrade to Awestruct 0.6.0 DBZ-1719

    • Run CI tests for delivered non-connector modules (like Quarkus) DBZ-1724

    • Remove overlap of different documentation config files DBZ-1729

    • Don’t fail upon receiving unkown operation events DBZ-1747

    • Provide a method to identify an envelope schema DBZ-1751

    • Upgrade to Mongo Java Driver version 3.12.1 DBZ-1761

    • Create initial Proposal for DB2 Source Connector DBZ-1509

    • Review Pull Request for DB2 Connector DBZ-1527

    • Test Set up of the DB2 Test Instance DBZ-1556

    • Create Documentation for the DB2 Connector DBZ-1557

    • Verify support of all DB2 types DBZ-1708

    Release 1.1.0.Alpha1 (January 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.1.0.Alpha1 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions, first check the upgrading notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.1.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.1.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    When using the outbox event routing SMT and configuring a column from which to obtain the Kafka record timestamp from (table.field.event.timestamp option), then that value could have been exported as milliseconds, microseconds or nanoseconds, based on the source column’s definition. As of this release, the timestamp always be exported as milliseconds (DBZ-1707).

    The deprecated Postgres connector option slot.drop_on_stop has been removed; use slot.drop.on.stop instead (DBZ-1600).

    New Features

    • MongoDB authentication against non-admin authsource DBZ-1168

    • Oracle: Add support for different representations of "NUMBER" Data Type DBZ-1552

    • Update Mongo Java driver to version 3.12.0 DBZ-1690

    • Support exporting change events in "CloudEvents" format DBZ-1292

    • Build Quarkus extension facilitating implementations of the outbox pattern DBZ-1478

    • Support column masking option for Postgres DBZ-1685

    Fixes

    This release includes the following fixes:

    • Make slot creation in PostgreSQL more resilient DBZ-1684

    • SQLserver type time(4)…​time(7) lost nanoseconds DBZ-1688

    • Support boolean as default for INT(1) column in MySQL DBZ-1689

    • SIGNAL statement is not recognized by DDL parser DBZ-1691

    • When using in embedded mode MYSQL connector fails DBZ-1693

    • MySQL connector fails to parse trigger DDL DBZ-1699

    Other changes

    This release includes also other changes:

    • Update outbox routing example DBZ-1673

    • Add option to JSON change event SerDe for ignoring unknown properties DBZ-1703

    • Update debezium/awestruct image to use Antora 2.3 alpha 2 DBZ-1713

    \ No newline at end of file diff --git a/releases/1.2/index.html b/releases/1.2/index.html index 821fdbf383..6370a64eca 100644 --- a/releases/1.2/index.html +++ b/releases/1.2/index.html @@ -1 +1 @@ - Debezium Release Series 1.2

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.12.3
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.12
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file + Debezium Release Series 1.2

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.12.3
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.12
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file diff --git a/releases/1.2/release-notes.html b/releases/1.2/release-notes.html index 6e4b2a3d17..87a671aafc 100644 --- a/releases/1.2/release-notes.html +++ b/releases/1.2/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.2

    Release Notes for Debezium 1.2

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.2.5.Final (September 24th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.5.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Fix Quarkus datasource configuration for Quarkus 1.9 DBZ-2558

    Other changes

    This release includes also other changes:

    • Prepare revised SMT docs (filter and content-based routing) for downstream DBZ-2567

    • Swap closing square bracket for curly brace in downstream title annotations DBZ-2577

    Release 1.2.4.Final (September 17th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The SMTs for content-based routing and filtering – both using JSR 223 scripting engines for script evaluation – have been moved from the Debezium core module into a separate artifact (DBZ-2549). This must be added to the plug-in directories of those connector(s) for which you wish to use those SMTs. When using the Debezium container image for Kafka Connect, set the environment variable ENABLE_DEBEZIUM_SCRIPTING to true in order to do so. This change was done so to allow for exposing scripting functionality only in environments with an appropriately secured Kafka Connect configuration interface.

    New Features

    There are no new features in this release.

    Fixes

    There are no new fixes in this release.

    Other changes

    This release includes also other changes:

    • Document outbox event router SMT DBZ-2480

    • Unify representation of events - part two - update other connector doc DBZ-2501

    • Add annotations to support splitting files for downstream docs DBZ-2539

    • Prepare message filtering SMT doc for product release DBZ-2460

    • Prepare content-based router SMT doc for product release DBZ-2519

    Release 1.2.3.Final (September 8th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • JSON functions in MySQL grammar unsupported DBZ-2453

    Other changes

    This release includes also other changes:

    • CloudEvents remains TP but has avro support downstream DBZ-2245

    • Prepare DB2 connector doc for TP DBZ-2403

    • Adjust outbox extension to updated Quarkus semantics DBZ-2465

    • Doc tweaks required to automatically build Db2 content in downstream user guide DBZ-2500

    Release 1.2.2.Final (August 25th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Adding new table to cdc causes the sqlconnector to fail DBZ-2303

    • LSNs in replication slots are not monotonically increasing DBZ-2338

    • Transaction data loss when process restarted DBZ-2397

    • java.lang.NullPointerException in ByLogicalTableRouter.java DBZ-2412

    Other changes

    This release includes also other changes:

    • Refactor: Add domain type for LSN DBZ-2200

    • Miscellaneous small doc updates for the 1.2 release DBZ-2399

    • Update some doc file names DBZ-2402

    Release 1.2.1.Final (July 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Document content based routing and filtering for MongoDB DBZ-2255

    • Handle MariaDB syntax add column IF EXISTS as part of alter table DDL DBZ-2219

    • Add Apicurio converters to Connect container image DBZ-2083

    Fixes

    This release includes the following fixes:

    • MongoDB connector is not resilient to Mongo connection errors DBZ-2141

    • MySQL connector should filter additional DML binlog entries for RDS by default DBZ-2275

    • Concurrent access to a thread map DBZ-2278

    • Postgres connector may skip events during snapshot-streaming transition DBZ-2288

    • MySQL connector emits false error while missing a required data DBZ-2301

    • io.debezium.engine.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy can’t be initiated due to NoSuchMethod error DBZ-2302

    • Allow single dimension DECIMAL in CAST DBZ-2305

    • MySQL JSON functions are missing from the grammar DBZ-2318

    • Description in documentation metrics tables is bold and shouldn’t be DBZ-2326

    • ALTER TABLE with timestamp default CURRENT_TIMESTAMP not null fails the task DBZ-2330

    Other changes

    This release includes also other changes:

    • Unstable tests in SQL Server connector DBZ-2217

    • Intermittent test failure on CI - SqlServerConnectorIT#verifyOffsets() DBZ-2220

    • Intermittent test failure on CI - MySQL DBZ-2229

    • Intermittent test failure on CI - SqlServerChangeTableSetIT#readHistoryAfterRestart() DBZ-2231

    • Failing test MySqlSourceTypeInSchemaIT.shouldPropagateSourceTypeAsSchemaParameter DBZ-2238

    • Intermittent test failure on CI - MySqlConnectorRegressionIT#shouldConsumeAllEventsFromDatabaseUsingBinlogAndNoSnapshot() DBZ-2243

    • Use upstream image in ApicurioRegistryTest DBZ-2256

    • Intermittent failure of MongoDbConnectorIT.shouldConsumeTransaction DBZ-2264

    • Intermittent test failure on CI - MySqlSourceTypeInSchemaIT#shouldPropagateSourceTypeByDatatype() DBZ-2269

    • Intermittent test failure on CI - MySqlConnectorIT#shouldNotParseQueryIfServerOptionDisabled DBZ-2270

    • Intermittent test failure on CI - RecordsStreamProducerIT#testEmptyChangesProducesHeartbeat DBZ-2271

    • Incorrect dependency from outbox to core module DBZ-2276

    • Slowness in FieldRenamesTest DBZ-2286

    • Create GitHub Action for verifying correct formatting DBZ-2287

    • Clarify expectations for replica identity and key-less tables DBZ-2307

    • Jenkins worker nodes must be logged in to Docker Hub DBZ-2312

    • Upgrade PostgreSQL driver to 4.2.14 DBZ-2317

    • Intermittent test failure on CI - PostgresConnectorIT#shouldOutputRecordsInCloudEventsFormat DBZ-2319

    • Intermittent test failure on CI - TablesWithoutPrimaryKeyIT#shouldProcessFromStreaming DBZ-2324

    • Intermittent test failure on CI - SqlServerConnectorIT#readOnlyApplicationIntent DBZ-2325

    • Intermittent test failure on CI - SnapshotIT#takeSnapshotWithOldStructAndStartStreaming DBZ-2331

    Release 1.2.0.Final (June 24th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Test failure due to superfluous schema change event emitted on connector start DBZ-2211

    • Intermittent test failures on CI DBZ-2232

    • Test SimpleSourceConnectorOutputTest.shouldGenerateExpected blocked DBZ-2241

    • CloudEventsConverter should use Apicurio converter for Avro DBZ-2250

    • Default value is not properly set for non-optional columns DBZ-2267

    Other changes

    This release includes also other changes:

    • Diff MySQL connector 0.10 and latest docs DBZ-1997

    • Remove redundant property in antora.yml DBZ-2223

    • Binary log client is not cleanly stopped in testsuite DBZ-2221

    • Intermittent test failure on CI - Postgres DBZ-2230

    • Build failure with Kafka 1.x DBZ-2240

    • Intermittent test failure on CI - SqlServerConnectorIT#readOnlyApplicationIntent() DBZ-2261

    • Test failure BinlogReaderIT#shouldFilterAllRecordsBasedOnDatabaseWhitelistFilter() DBZ-2262

    Release 1.2.0.CR2 (June 18th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.CR2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.CR2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.CR2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Debezium Server distribution package has been moved to a different URL and has been renamed to conform to standard industry practises (DBZ-2212).

    New Features

    • DB2 connector documentation ambiguous regarding licensing DBZ-1835

    • Optimize SQLServer connector query DBZ-2120

    • Documentation for implementing StreamNameMapper DBZ-2163

    • Update architecture page DBZ-2096

    Fixes

    This release includes the following fixes:

    • Encountered error when snapshotting collection type column DBZ-2117

    • Missing dependencies for Debezium Server Pulsar sink DBZ-2201

    Other changes

    This release includes also other changes:

    • Tests Asserting No Open Transactions Failing DBZ-2176

    • General test harness for End-2-End Benchmarking DBZ-1812

    • Add tests for datatype.propagate.source.type for all connectors DBZ-1916

    • Productize CloudEvents support DBZ-2019

    • [Doc] Add Debezium Architecture to downstream documentation DBZ-2029

    • Transaction metadata documentation DBZ-2069

    • Inconsistent test failures DBZ-2177

    • Add Jandex plugin to Debezium Server connectors DBZ-2192

    • Ability to scale wait times in OCP test-suite DBZ-2194

    • CI doesn’t delete mongo and sql server projects on successful runs DBZ-2195

    • Document database history and web server port for Debezium Server DBZ-2198

    • Do not throw IndexOutOfBoundsException when no task configuration is available DBZ-2199

    • Upgrade Apicurio to 1.2.2.Final DBZ-2206

    • Intermitent test failures DBZ-2207

    • Increase Pulsar Server timeouts DBZ-2210

    • Drop distribution from Debezium Server artifact name DBZ-2214

    Release 1.2.0.CR1 (June 10th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The format of whitelist/blacklist filter expressions for the Oracle connector has changed: the database name is not to be given as part of these any longer (the reason being that each connector only ever is configured in the scope of exactly one database). Filters like ORCLPDB1.SOMESCHEMA.SOMETABLE must be adjusted to SOMESCHEMA.SOMETABLE. The same applies for configuration properties referencing specific table columns, such as column.propagate.source.type.

    The format of whitelist/blacklist filter expressions for the SQL Server connector has changed: the database name is not to be given as part of these any longer (the reason being that each connector only ever is configured in the scope of exactly one database). Filters like testDB.dbo.orders must be adjusted to dbo.orders. The old format still is supported, but should not be used any longer and will be de-supported in a future version. The same applies for configuration properties referencing specific table columns, such as column.propagate.source.type.

    New Features

    • Restrict the set of tables with a publication when using pgoutput DBZ-1813

    • Support configuring different encodings for binary source data DBZ-1814

    • Add API for not registering metrics MBean into the platform MBean server DBZ-2089

    • Unable to handle UDT data DBZ-2091

    • Improve SQL Server reconnect during shutdown and connection resets DBZ-2106

    • OpenShift tests for SQL Server connector before GA DBZ-2113

    • OpenShift tests for MongoDB Connector before GA DBZ-2114

    • Log begin/end of schema recovery on INFO level DBZ-2149

    • Allow outbox EventRouter to pass non-String based Keys DBZ-2152

    • Introduce API checks DBZ-2159

    • Bump mysql binlog version DBZ-2160

    • Postgresql - Allow for include.unknown.datatypes to return string instead of hash DBZ-1266

    • Consider Apicurio registry DBZ-1639

    • Debezium Server should support Google Cloud PubSub DBZ-2092

    • Sink adapter for Apache Pulsar DBZ-2112

    Fixes

    This release includes the following fixes:

    • Transaction opened by Debezium is left idle and never committed DBZ-2118

    • Don’t call markBatchFinished() in finally block DBZ-2124

    • kafka SSL passwords need to be added to the Sensitive Properties list DBZ-2125

    • Intermittent test failure on CI - SQL Server DBZ-2126

    • CREATE TABLE query is giving parsing exception DBZ-2130

    • Misc. Javadoc and docs fixes DBZ-2136

    • Avro schema doesn’t change if a column default value is dropped DBZ-2140

    • Multiple SETs not supported in trigger DBZ-2142

    • Don’t validate internal database.history.connector.* config parameters DBZ-2144

    • ANTLR parser doesn’t handle MariaDB syntax drop index IF EXISTS as part of alter table DDL DBZ-2151

    • Casting as INT causes a ParsingError DBZ-2153

    • Calling function UTC_TIMESTAMP without parenthesis causes a parsing error DBZ-2154

    • Could not find or load main class io.debezium.server.Main DBZ-2170

    • MongoDB connector snapshot NPE in case of document field named "op" DBZ-2116

    • Adapt to changed TX representation in oplog in Mongo 4.2 DBZ-2216

    • Intermittent test failure — Multiple admin clients with same id DBZ-2228

    Other changes

    This release includes also other changes:

    • Adding tests and doc updates around column masking and truncating DBZ-775

    • Refactor/use common configuration parameters DBZ-1657

    • Develop sizing recommendations, load tests etc. DBZ-1662

    • Add performance test for SMTs like filters DBZ-1929

    • Add banner to older doc versions about them being outdated DBZ-1951

    • SMT Documentation DBZ-2021

    • Instable integration test with Testcontainers DBZ-2033

    • Add test for schema history topic for Oracle connector DBZ-2056

    • Random test failures DBZ-2060

    • Set up CI jobs for JDK 14/15 DBZ-2065

    • Introduce Any type for server to seamlessly integrate with Debezium API DBZ-2104

    • Update AsciiDoc markup in doc files for downstream reuse DBZ-2105

    • Upgrade to Quarkus 1.5.0.Final DBZ-2119

    • Additional AsciiDoc markup updates needed in doc files for downstream reuse DBZ-2129

    • Refactor & Extend OpenShift test-suite tooling to prepare for MongoDB and SQL Server DBZ-2132

    • OpenShift tests are failing when waiting for Connect metrics to be exposed DBZ-2135

    • Support incubator build in product release jobs DBZ-2137

    • Rebase MySQL grammar on the latest upstream version DBZ-2143

    • Await coordinator shutdown in embedded engine DBZ-2150

    • More meaningful exception in case of replication slot conflict DBZ-2156

    • Intermittent test failure on CI - Postgres DBZ-2157

    • OpenShift pipeline uses incorrect projects for Mongo and Sql Server deployment DBZ-2164

    • Incorrect polling timeout in AbstractReader DBZ-2169

    Release 1.2.0.Beta2 (May 19th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The snapshot mode initial_schema_only was renamed schema_only for Db2 connector (DBZ-2051).

    The previously deprecated options operation.header and add.source.fields of the ExtractNewRecordState have been removed; please use add.headers and add.fields instead (DBZ-1828).

    When instantiating the Debezium container in integration tests with Testcontainers, the full image name must be given now, e.g. 1debezium/connect:1.2.0.Beta2`. This is to allow for using custom container images in tests, e.g. containing additional SMTs, converters or sink connectors (DBZ-2070).

    New Features

    • Add JDBC driver versions to docs DBZ-2031

    • Add a few more loggings for Cassandra Connector DBZ-2066

    • Provide ready-to-use standalone application based on the embedded engine DBZ-651

    • Add option to skip LSN timestamp queries DBZ-1988

    • Add option to logical topic router for controlling placement of table information DBZ-2034

    • Add headers and topic name into scripting transforms DBZ-2074

    • Filter and content-based router SMTs should be restrictable to certain topics DBZ-2024

    Fixes

    This release includes the following fixes:

    • Avro schema doesn’t change if a column default value changes from 'foo' to 'bar' DBZ-2061

    • DDL statement throws error if compression keyword contains backticks (``) DBZ-2062

    • Error and connector stops when DDL contains algorithm=instant DBZ-2067

    • Debezium Engine advanced record consuming example broken DBZ-2073

    • Unable to parse MySQL ALTER statement with named primary key DBZ-2080

    • Missing schema-serializer dependency for Avro DBZ-2082

    • TinyIntOneToBooleanConverter doesn’t seem to work with columns having a default value DBZ-2085

    Other changes

    This release includes also other changes:

    • Add ability to insert fields from op field in ExtractNewDocumentState DBZ-1791

    • Test with MySQL 8.0.20 DBZ-2041

    • Update debezium-examples/tutorial README docker-compose file is missing DBZ-2059

    • Skip tests that are no longer compatible with Kafka 1.x DBZ-2068

    • Remove additional Jackson dependencies as of AK 2.5 DBZ-2076

    • Make EventProcessingFailureHandlingIT resilient against timing issues DBZ-2078

    • Tar packages must use posix format DBZ-2088

    • Remove unused sourceInfo variable DBZ-2090

    Release 1.2.0.Beta1 (May 7th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Field eventType was removed from Outbox router SMT (DBZ-2014).

    JDBC driver has been upgrade to the version to 42.2.12 (DBZ-2027). Due to changes in the driver behaviour it is necessary to keep Debezium and driver versions aligned.

    Debezium API now allows conversion to JSON and Avro types distinctly for key and value (DBZ-1970). To enable this feature it was necessary to modify the incubating Debezium API.

    New Features

    • Don’t try to database history topic if it exists already DBZ-1886

    • Deleted database history should be detected for all connectors DBZ-1923

    • Provide anchors to connector parameters DBZ-1933

    • move static methods TRUNCATE_COLUMN and MASK_COLUMN as attributes to RelationalDatabaseConnectorConfig DBZ-1972

    • Implement SKIPPED_OPERATIONS for mysql DBZ-1895

    • User facing schema history topic for SQL Server DBZ-1904

    • Multiline stack traces can be collapsed into a single log event DBZ-1913

    • Introduce column.whitelist for Postgres Connector DBZ-1962

    • Add support for Postgres time, timestamp array columns DBZ-1969

    • Add support for Postgres Json and Jsonb array columns DBZ-1990

    • Content-based topic routing based on scripting languages DBZ-2000

    • Support different converters for key/value in embedded engine DBZ-1970

    Fixes

    This release includes the following fixes:

    • bit varying column has value that is too large to be cast to a long DBZ-1949

    • PostgreSQL Sink connector with outbox event router and Avro uses wrong default io.confluent schema namespace DBZ-1963

    • Stop processing new commitlogs in cdc folder DBZ-1985

    • [Doc] Debezium User Guide should provide example of DB connector yaml and deployment instructions DBZ-2011

    • ExtractNewRecordState SMT spamming logs for heartbeat messages DBZ-2036

    • MySQL alias FLUSH TABLE not handled DBZ-2047

    • Embedded engine not compatible with Kafka 1.x DBZ-2054

    Other changes

    This release includes also other changes:

    • Blog post and demo about Debezium + Camel DBZ-1656

    • Refactor connector config code to share the configuration definition DBZ-1750

    • DB2 connector follow-up refactorings DBZ-1753

    • Oracle JDBC driver available in Maven Central DBZ-1878

    • Align snapshot/streaming semantics in MongoDB documentation DBZ-1901

    • Add MySQL 5.5 and 5.6 to test matrix. DBZ-1953

    • Upgrade to Quarkus to 1.4.1 release DBZ-1975

    • Version selector on releases page should show all versions DBZ-1979

    • Upgrade to Apache Kafka 2.5.0 and Confluent Platform 5.5.0 DBZ-1981

    • Fix broken link DBZ-1983

    • Update Outbox Quarkus extension yaml DBZ-1991

    • Allow for simplified property references in filter SMT with graal.js DBZ-1993

    • Avoid broken cross-book references in downstream docs DBZ-1999

    • Fix wrong attribute name in MongoDB connector DBZ-2006

    • Upgrade formatter and Impsort plugins DBZ-2007

    • Clarify support for non-primary key tables in PostgreSQL documentation DBZ-2010

    • Intermittent test failure on CI DBZ-2030

    • Cleanup Postgres TypeRegistry DBZ-2038

    • Upgrade to latest parent pom and checkstyle DBZ-2039

    • Reduce build output to avoid maximum log length problems on CI DBZ-2043

    • Postgres TypeRegistry makes one query per enum type at startup DBZ-2044

    • Remove obsolete metrics from downstream docs DBZ-1947

    Release 1.2.0.Alpha1 (April 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.1 and has been tested with version 2.4.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    For the SQL Server connector, the previously deprecated snapshot mode initial_schema_only has been removed. The mode schema_only should be used instead, providing the same behavior and semantics (DBZ-1945).

    The previously deprecated message transformations UnwrapFromEnvelope and UnwrapMongoDbEnvelope have been removed. Instead, please use ExtractNewRecordState and ExtractNewDocumentState, respectively (DBZ-1968).

    New Features

    • Expose original value for PK updates DBZ-1531

    • New column masking mode: consistent hashing DBZ-1692

    • Provide a filtering SMT DBZ-1782

    • Support converters for embedded engine DBZ-1807

    • Enhance MongoDB connector metrics DBZ-1859

    • SQL Server connector: support reconnect after the database connection is broken DBZ-1882

    • Support SMTs in embedded engine DBZ-1930

    • Snapshot metrics shows TotalNumberOfEventsSeen as zero DBZ-1932

    Fixes

    This release includes the following fixes:

    • java.lang.IllegalArgumentException: Timestamp format must be yyyy-mm-dd hh:mm:ss[.fffffffff] DBZ-1744

    • Snapshot lock timeout setting is not documented DBZ-1914

    • AvroRuntimeException when publishing transaction metadata DBZ-1915

    • Connector restart logic throttles for the first 2 seconds DBZ-1918

    • Wal2json empty change event could cause NPE above version 1.0.3.final DBZ-1922

    • Misleading error message on lost database connection DBZ-1926

    • Cassandra CDC should not move and delete processed commitLog file under testing mode DBZ-1927

    • Broken internal links and anchors in documentation DBZ-1935

    • Dokumentation files in modules create separate pages, should be partials instead DBZ-1944

    • Validation of binlog_row_image is not compatible with MySQL 5.5 DBZ-1950

    • High CPU usage when idle DBZ-1960

    • Outbox Quarkus Extension throws NPE in quarkus:dev mode DBZ-1966

    • Cassandra Connector: unable to deserialize column mutation with reversed type DBZ-1967

    Other changes

    This release includes also other changes:

    • Replace Custom CassandraTopicSelector with DBZ’s TopicSelector class in Cassandra Connector DBZ-1407

    • Improve documentation on WAL disk space usage for Postgres connector DBZ-1732

    • Outbox Quarkus Extension: Update version of extension used by demo DBZ-1786

    • Community newsletter 1/2020 DBZ-1806

    • Remove obsolete SnapshotChangeRecordEmitter DBZ-1898

    • Fix typo in Quarkus Outbox extension documentation DBZ-1902

    • Update schema change topic section of SQL Server connector doc DBZ-1903

    • Documentation should link to Apache Kafka upstream docs DBZ-1906

    • Log warning about insufficient retention time for DB history topic DBZ-1905

    • The error messaging around binlog configuration is missleading DBZ-1911

    • Restore documentation of MySQL event structures DBZ-1919

    • Link from monitoring page to connector-specific metrics DBZ-1920

    • Update snapshot.mode options in SQL Server documentation DBZ-1924

    • Update build and container images to Apache Kafka 2.4.1 DBZ-1925

    • Avoid Thread#sleep() calls in Oracle connector tests DBZ-1942

    • Different versions of Jackson components pulled in as dependencies DBZ-1943

    • Remove deprecated connector option value "initial_schema_only" DBZ-1945

    • Add docs for mask column and truncate column features DBZ-1954

    • Upgrade MongoDB driver to 3.12.3 DBZ-1958

    • Remove deprecated unwrap SMTs DBZ-1968

    \ No newline at end of file + Release Notes for Debezium 1.2

    Release Notes for Debezium 1.2

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.2.5.Final (September 24th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.5.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Fix Quarkus datasource configuration for Quarkus 1.9 DBZ-2558

    Other changes

    This release includes also other changes:

    • Prepare revised SMT docs (filter and content-based routing) for downstream DBZ-2567

    • Swap closing square bracket for curly brace in downstream title annotations DBZ-2577

    Release 1.2.4.Final (September 17th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The SMTs for content-based routing and filtering – both using JSR 223 scripting engines for script evaluation – have been moved from the Debezium core module into a separate artifact (DBZ-2549). This must be added to the plug-in directories of those connector(s) for which you wish to use those SMTs. When using the Debezium container image for Kafka Connect, set the environment variable ENABLE_DEBEZIUM_SCRIPTING to true in order to do so. This change was done so to allow for exposing scripting functionality only in environments with an appropriately secured Kafka Connect configuration interface.

    New Features

    There are no new features in this release.

    Fixes

    There are no new fixes in this release.

    Other changes

    This release includes also other changes:

    • Document outbox event router SMT DBZ-2480

    • Unify representation of events - part two - update other connector doc DBZ-2501

    • Add annotations to support splitting files for downstream docs DBZ-2539

    • Prepare message filtering SMT doc for product release DBZ-2460

    • Prepare content-based router SMT doc for product release DBZ-2519

    Release 1.2.3.Final (September 8th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • JSON functions in MySQL grammar unsupported DBZ-2453

    Other changes

    This release includes also other changes:

    • CloudEvents remains TP but has avro support downstream DBZ-2245

    • Prepare DB2 connector doc for TP DBZ-2403

    • Adjust outbox extension to updated Quarkus semantics DBZ-2465

    • Doc tweaks required to automatically build Db2 content in downstream user guide DBZ-2500

    Release 1.2.2.Final (August 25th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Adding new table to cdc causes the sqlconnector to fail DBZ-2303

    • LSNs in replication slots are not monotonically increasing DBZ-2338

    • Transaction data loss when process restarted DBZ-2397

    • java.lang.NullPointerException in ByLogicalTableRouter.java DBZ-2412

    Other changes

    This release includes also other changes:

    • Refactor: Add domain type for LSN DBZ-2200

    • Miscellaneous small doc updates for the 1.2 release DBZ-2399

    • Update some doc file names DBZ-2402

    Release 1.2.1.Final (July 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Document content based routing and filtering for MongoDB DBZ-2255

    • Handle MariaDB syntax add column IF EXISTS as part of alter table DDL DBZ-2219

    • Add Apicurio converters to Connect container image DBZ-2083

    Fixes

    This release includes the following fixes:

    • MongoDB connector is not resilient to Mongo connection errors DBZ-2141

    • MySQL connector should filter additional DML binlog entries for RDS by default DBZ-2275

    • Concurrent access to a thread map DBZ-2278

    • Postgres connector may skip events during snapshot-streaming transition DBZ-2288

    • MySQL connector emits false error while missing a required data DBZ-2301

    • io.debezium.engine.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy can’t be initiated due to NoSuchMethod error DBZ-2302

    • Allow single dimension DECIMAL in CAST DBZ-2305

    • MySQL JSON functions are missing from the grammar DBZ-2318

    • Description in documentation metrics tables is bold and shouldn’t be DBZ-2326

    • ALTER TABLE with timestamp default CURRENT_TIMESTAMP not null fails the task DBZ-2330

    Other changes

    This release includes also other changes:

    • Unstable tests in SQL Server connector DBZ-2217

    • Intermittent test failure on CI - SqlServerConnectorIT#verifyOffsets() DBZ-2220

    • Intermittent test failure on CI - MySQL DBZ-2229

    • Intermittent test failure on CI - SqlServerChangeTableSetIT#readHistoryAfterRestart() DBZ-2231

    • Failing test MySqlSourceTypeInSchemaIT.shouldPropagateSourceTypeAsSchemaParameter DBZ-2238

    • Intermittent test failure on CI - MySqlConnectorRegressionIT#shouldConsumeAllEventsFromDatabaseUsingBinlogAndNoSnapshot() DBZ-2243

    • Use upstream image in ApicurioRegistryTest DBZ-2256

    • Intermittent failure of MongoDbConnectorIT.shouldConsumeTransaction DBZ-2264

    • Intermittent test failure on CI - MySqlSourceTypeInSchemaIT#shouldPropagateSourceTypeByDatatype() DBZ-2269

    • Intermittent test failure on CI - MySqlConnectorIT#shouldNotParseQueryIfServerOptionDisabled DBZ-2270

    • Intermittent test failure on CI - RecordsStreamProducerIT#testEmptyChangesProducesHeartbeat DBZ-2271

    • Incorrect dependency from outbox to core module DBZ-2276

    • Slowness in FieldRenamesTest DBZ-2286

    • Create GitHub Action for verifying correct formatting DBZ-2287

    • Clarify expectations for replica identity and key-less tables DBZ-2307

    • Jenkins worker nodes must be logged in to Docker Hub DBZ-2312

    • Upgrade PostgreSQL driver to 4.2.14 DBZ-2317

    • Intermittent test failure on CI - PostgresConnectorIT#shouldOutputRecordsInCloudEventsFormat DBZ-2319

    • Intermittent test failure on CI - TablesWithoutPrimaryKeyIT#shouldProcessFromStreaming DBZ-2324

    • Intermittent test failure on CI - SqlServerConnectorIT#readOnlyApplicationIntent DBZ-2325

    • Intermittent test failure on CI - SnapshotIT#takeSnapshotWithOldStructAndStartStreaming DBZ-2331

    Release 1.2.0.Final (June 24th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    There are no new features in this release.

    Fixes

    This release includes the following fixes:

    • Test failure due to superfluous schema change event emitted on connector start DBZ-2211

    • Intermittent test failures on CI DBZ-2232

    • Test SimpleSourceConnectorOutputTest.shouldGenerateExpected blocked DBZ-2241

    • CloudEventsConverter should use Apicurio converter for Avro DBZ-2250

    • Default value is not properly set for non-optional columns DBZ-2267

    Other changes

    This release includes also other changes:

    • Diff MySQL connector 0.10 and latest docs DBZ-1997

    • Remove redundant property in antora.yml DBZ-2223

    • Binary log client is not cleanly stopped in testsuite DBZ-2221

    • Intermittent test failure on CI - Postgres DBZ-2230

    • Build failure with Kafka 1.x DBZ-2240

    • Intermittent test failure on CI - SqlServerConnectorIT#readOnlyApplicationIntent() DBZ-2261

    • Test failure BinlogReaderIT#shouldFilterAllRecordsBasedOnDatabaseWhitelistFilter() DBZ-2262

    Release 1.2.0.CR2 (June 18th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.CR2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.CR2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.CR2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Debezium Server distribution package has been moved to a different URL and has been renamed to conform to standard industry practises (DBZ-2212).

    New Features

    • DB2 connector documentation ambiguous regarding licensing DBZ-1835

    • Optimize SQLServer connector query DBZ-2120

    • Documentation for implementing StreamNameMapper DBZ-2163

    • Update architecture page DBZ-2096

    Fixes

    This release includes the following fixes:

    • Encountered error when snapshotting collection type column DBZ-2117

    • Missing dependencies for Debezium Server Pulsar sink DBZ-2201

    Other changes

    This release includes also other changes:

    • Tests Asserting No Open Transactions Failing DBZ-2176

    • General test harness for End-2-End Benchmarking DBZ-1812

    • Add tests for datatype.propagate.source.type for all connectors DBZ-1916

    • Productize CloudEvents support DBZ-2019

    • [Doc] Add Debezium Architecture to downstream documentation DBZ-2029

    • Transaction metadata documentation DBZ-2069

    • Inconsistent test failures DBZ-2177

    • Add Jandex plugin to Debezium Server connectors DBZ-2192

    • Ability to scale wait times in OCP test-suite DBZ-2194

    • CI doesn’t delete mongo and sql server projects on successful runs DBZ-2195

    • Document database history and web server port for Debezium Server DBZ-2198

    • Do not throw IndexOutOfBoundsException when no task configuration is available DBZ-2199

    • Upgrade Apicurio to 1.2.2.Final DBZ-2206

    • Intermitent test failures DBZ-2207

    • Increase Pulsar Server timeouts DBZ-2210

    • Drop distribution from Debezium Server artifact name DBZ-2214

    Release 1.2.0.CR1 (June 10th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The format of whitelist/blacklist filter expressions for the Oracle connector has changed: the database name is not to be given as part of these any longer (the reason being that each connector only ever is configured in the scope of exactly one database). Filters like ORCLPDB1.SOMESCHEMA.SOMETABLE must be adjusted to SOMESCHEMA.SOMETABLE. The same applies for configuration properties referencing specific table columns, such as column.propagate.source.type.

    The format of whitelist/blacklist filter expressions for the SQL Server connector has changed: the database name is not to be given as part of these any longer (the reason being that each connector only ever is configured in the scope of exactly one database). Filters like testDB.dbo.orders must be adjusted to dbo.orders. The old format still is supported, but should not be used any longer and will be de-supported in a future version. The same applies for configuration properties referencing specific table columns, such as column.propagate.source.type.

    New Features

    • Restrict the set of tables with a publication when using pgoutput DBZ-1813

    • Support configuring different encodings for binary source data DBZ-1814

    • Add API for not registering metrics MBean into the platform MBean server DBZ-2089

    • Unable to handle UDT data DBZ-2091

    • Improve SQL Server reconnect during shutdown and connection resets DBZ-2106

    • OpenShift tests for SQL Server connector before GA DBZ-2113

    • OpenShift tests for MongoDB Connector before GA DBZ-2114

    • Log begin/end of schema recovery on INFO level DBZ-2149

    • Allow outbox EventRouter to pass non-String based Keys DBZ-2152

    • Introduce API checks DBZ-2159

    • Bump mysql binlog version DBZ-2160

    • Postgresql - Allow for include.unknown.datatypes to return string instead of hash DBZ-1266

    • Consider Apicurio registry DBZ-1639

    • Debezium Server should support Google Cloud PubSub DBZ-2092

    • Sink adapter for Apache Pulsar DBZ-2112

    Fixes

    This release includes the following fixes:

    • Transaction opened by Debezium is left idle and never committed DBZ-2118

    • Don’t call markBatchFinished() in finally block DBZ-2124

    • kafka SSL passwords need to be added to the Sensitive Properties list DBZ-2125

    • Intermittent test failure on CI - SQL Server DBZ-2126

    • CREATE TABLE query is giving parsing exception DBZ-2130

    • Misc. Javadoc and docs fixes DBZ-2136

    • Avro schema doesn’t change if a column default value is dropped DBZ-2140

    • Multiple SETs not supported in trigger DBZ-2142

    • Don’t validate internal database.history.connector.* config parameters DBZ-2144

    • ANTLR parser doesn’t handle MariaDB syntax drop index IF EXISTS as part of alter table DDL DBZ-2151

    • Casting as INT causes a ParsingError DBZ-2153

    • Calling function UTC_TIMESTAMP without parenthesis causes a parsing error DBZ-2154

    • Could not find or load main class io.debezium.server.Main DBZ-2170

    • MongoDB connector snapshot NPE in case of document field named "op" DBZ-2116

    • Adapt to changed TX representation in oplog in Mongo 4.2 DBZ-2216

    • Intermittent test failure — Multiple admin clients with same id DBZ-2228

    Other changes

    This release includes also other changes:

    • Adding tests and doc updates around column masking and truncating DBZ-775

    • Refactor/use common configuration parameters DBZ-1657

    • Develop sizing recommendations, load tests etc. DBZ-1662

    • Add performance test for SMTs like filters DBZ-1929

    • Add banner to older doc versions about them being outdated DBZ-1951

    • SMT Documentation DBZ-2021

    • Instable integration test with Testcontainers DBZ-2033

    • Add test for schema history topic for Oracle connector DBZ-2056

    • Random test failures DBZ-2060

    • Set up CI jobs for JDK 14/15 DBZ-2065

    • Introduce Any type for server to seamlessly integrate with Debezium API DBZ-2104

    • Update AsciiDoc markup in doc files for downstream reuse DBZ-2105

    • Upgrade to Quarkus 1.5.0.Final DBZ-2119

    • Additional AsciiDoc markup updates needed in doc files for downstream reuse DBZ-2129

    • Refactor & Extend OpenShift test-suite tooling to prepare for MongoDB and SQL Server DBZ-2132

    • OpenShift tests are failing when waiting for Connect metrics to be exposed DBZ-2135

    • Support incubator build in product release jobs DBZ-2137

    • Rebase MySQL grammar on the latest upstream version DBZ-2143

    • Await coordinator shutdown in embedded engine DBZ-2150

    • More meaningful exception in case of replication slot conflict DBZ-2156

    • Intermittent test failure on CI - Postgres DBZ-2157

    • OpenShift pipeline uses incorrect projects for Mongo and Sql Server deployment DBZ-2164

    • Incorrect polling timeout in AbstractReader DBZ-2169

    Release 1.2.0.Beta2 (May 19th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The snapshot mode initial_schema_only was renamed schema_only for Db2 connector (DBZ-2051).

    The previously deprecated options operation.header and add.source.fields of the ExtractNewRecordState have been removed; please use add.headers and add.fields instead (DBZ-1828).

    When instantiating the Debezium container in integration tests with Testcontainers, the full image name must be given now, e.g. 1debezium/connect:1.2.0.Beta2`. This is to allow for using custom container images in tests, e.g. containing additional SMTs, converters or sink connectors (DBZ-2070).

    New Features

    • Add JDBC driver versions to docs DBZ-2031

    • Add a few more loggings for Cassandra Connector DBZ-2066

    • Provide ready-to-use standalone application based on the embedded engine DBZ-651

    • Add option to skip LSN timestamp queries DBZ-1988

    • Add option to logical topic router for controlling placement of table information DBZ-2034

    • Add headers and topic name into scripting transforms DBZ-2074

    • Filter and content-based router SMTs should be restrictable to certain topics DBZ-2024

    Fixes

    This release includes the following fixes:

    • Avro schema doesn’t change if a column default value changes from 'foo' to 'bar' DBZ-2061

    • DDL statement throws error if compression keyword contains backticks (``) DBZ-2062

    • Error and connector stops when DDL contains algorithm=instant DBZ-2067

    • Debezium Engine advanced record consuming example broken DBZ-2073

    • Unable to parse MySQL ALTER statement with named primary key DBZ-2080

    • Missing schema-serializer dependency for Avro DBZ-2082

    • TinyIntOneToBooleanConverter doesn’t seem to work with columns having a default value DBZ-2085

    Other changes

    This release includes also other changes:

    • Add ability to insert fields from op field in ExtractNewDocumentState DBZ-1791

    • Test with MySQL 8.0.20 DBZ-2041

    • Update debezium-examples/tutorial README docker-compose file is missing DBZ-2059

    • Skip tests that are no longer compatible with Kafka 1.x DBZ-2068

    • Remove additional Jackson dependencies as of AK 2.5 DBZ-2076

    • Make EventProcessingFailureHandlingIT resilient against timing issues DBZ-2078

    • Tar packages must use posix format DBZ-2088

    • Remove unused sourceInfo variable DBZ-2090

    Release 1.2.0.Beta1 (May 7th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Field eventType was removed from Outbox router SMT (DBZ-2014).

    JDBC driver has been upgrade to the version to 42.2.12 (DBZ-2027). Due to changes in the driver behaviour it is necessary to keep Debezium and driver versions aligned.

    Debezium API now allows conversion to JSON and Avro types distinctly for key and value (DBZ-1970). To enable this feature it was necessary to modify the incubating Debezium API.

    New Features

    • Don’t try to database history topic if it exists already DBZ-1886

    • Deleted database history should be detected for all connectors DBZ-1923

    • Provide anchors to connector parameters DBZ-1933

    • move static methods TRUNCATE_COLUMN and MASK_COLUMN as attributes to RelationalDatabaseConnectorConfig DBZ-1972

    • Implement SKIPPED_OPERATIONS for mysql DBZ-1895

    • User facing schema history topic for SQL Server DBZ-1904

    • Multiline stack traces can be collapsed into a single log event DBZ-1913

    • Introduce column.whitelist for Postgres Connector DBZ-1962

    • Add support for Postgres time, timestamp array columns DBZ-1969

    • Add support for Postgres Json and Jsonb array columns DBZ-1990

    • Content-based topic routing based on scripting languages DBZ-2000

    • Support different converters for key/value in embedded engine DBZ-1970

    Fixes

    This release includes the following fixes:

    • bit varying column has value that is too large to be cast to a long DBZ-1949

    • PostgreSQL Sink connector with outbox event router and Avro uses wrong default io.confluent schema namespace DBZ-1963

    • Stop processing new commitlogs in cdc folder DBZ-1985

    • [Doc] Debezium User Guide should provide example of DB connector yaml and deployment instructions DBZ-2011

    • ExtractNewRecordState SMT spamming logs for heartbeat messages DBZ-2036

    • MySQL alias FLUSH TABLE not handled DBZ-2047

    • Embedded engine not compatible with Kafka 1.x DBZ-2054

    Other changes

    This release includes also other changes:

    • Blog post and demo about Debezium + Camel DBZ-1656

    • Refactor connector config code to share the configuration definition DBZ-1750

    • DB2 connector follow-up refactorings DBZ-1753

    • Oracle JDBC driver available in Maven Central DBZ-1878

    • Align snapshot/streaming semantics in MongoDB documentation DBZ-1901

    • Add MySQL 5.5 and 5.6 to test matrix. DBZ-1953

    • Upgrade to Quarkus to 1.4.1 release DBZ-1975

    • Version selector on releases page should show all versions DBZ-1979

    • Upgrade to Apache Kafka 2.5.0 and Confluent Platform 5.5.0 DBZ-1981

    • Fix broken link DBZ-1983

    • Update Outbox Quarkus extension yaml DBZ-1991

    • Allow for simplified property references in filter SMT with graal.js DBZ-1993

    • Avoid broken cross-book references in downstream docs DBZ-1999

    • Fix wrong attribute name in MongoDB connector DBZ-2006

    • Upgrade formatter and Impsort plugins DBZ-2007

    • Clarify support for non-primary key tables in PostgreSQL documentation DBZ-2010

    • Intermittent test failure on CI DBZ-2030

    • Cleanup Postgres TypeRegistry DBZ-2038

    • Upgrade to latest parent pom and checkstyle DBZ-2039

    • Reduce build output to avoid maximum log length problems on CI DBZ-2043

    • Postgres TypeRegistry makes one query per enum type at startup DBZ-2044

    • Remove obsolete metrics from downstream docs DBZ-1947

    Release 1.2.0.Alpha1 (April 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.4.1 and has been tested with version 2.4.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.2.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.2.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.2.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    For the SQL Server connector, the previously deprecated snapshot mode initial_schema_only has been removed. The mode schema_only should be used instead, providing the same behavior and semantics (DBZ-1945).

    The previously deprecated message transformations UnwrapFromEnvelope and UnwrapMongoDbEnvelope have been removed. Instead, please use ExtractNewRecordState and ExtractNewDocumentState, respectively (DBZ-1968).

    New Features

    • Expose original value for PK updates DBZ-1531

    • New column masking mode: consistent hashing DBZ-1692

    • Provide a filtering SMT DBZ-1782

    • Support converters for embedded engine DBZ-1807

    • Enhance MongoDB connector metrics DBZ-1859

    • SQL Server connector: support reconnect after the database connection is broken DBZ-1882

    • Support SMTs in embedded engine DBZ-1930

    • Snapshot metrics shows TotalNumberOfEventsSeen as zero DBZ-1932

    Fixes

    This release includes the following fixes:

    • java.lang.IllegalArgumentException: Timestamp format must be yyyy-mm-dd hh:mm:ss[.fffffffff] DBZ-1744

    • Snapshot lock timeout setting is not documented DBZ-1914

    • AvroRuntimeException when publishing transaction metadata DBZ-1915

    • Connector restart logic throttles for the first 2 seconds DBZ-1918

    • Wal2json empty change event could cause NPE above version 1.0.3.final DBZ-1922

    • Misleading error message on lost database connection DBZ-1926

    • Cassandra CDC should not move and delete processed commitLog file under testing mode DBZ-1927

    • Broken internal links and anchors in documentation DBZ-1935

    • Dokumentation files in modules create separate pages, should be partials instead DBZ-1944

    • Validation of binlog_row_image is not compatible with MySQL 5.5 DBZ-1950

    • High CPU usage when idle DBZ-1960

    • Outbox Quarkus Extension throws NPE in quarkus:dev mode DBZ-1966

    • Cassandra Connector: unable to deserialize column mutation with reversed type DBZ-1967

    Other changes

    This release includes also other changes:

    • Replace Custom CassandraTopicSelector with DBZ’s TopicSelector class in Cassandra Connector DBZ-1407

    • Improve documentation on WAL disk space usage for Postgres connector DBZ-1732

    • Outbox Quarkus Extension: Update version of extension used by demo DBZ-1786

    • Community newsletter 1/2020 DBZ-1806

    • Remove obsolete SnapshotChangeRecordEmitter DBZ-1898

    • Fix typo in Quarkus Outbox extension documentation DBZ-1902

    • Update schema change topic section of SQL Server connector doc DBZ-1903

    • Documentation should link to Apache Kafka upstream docs DBZ-1906

    • Log warning about insufficient retention time for DB history topic DBZ-1905

    • The error messaging around binlog configuration is missleading DBZ-1911

    • Restore documentation of MySQL event structures DBZ-1919

    • Link from monitoring page to connector-specific metrics DBZ-1920

    • Update snapshot.mode options in SQL Server documentation DBZ-1924

    • Update build and container images to Apache Kafka 2.4.1 DBZ-1925

    • Avoid Thread#sleep() calls in Oracle connector tests DBZ-1942

    • Different versions of Jackson components pulled in as dependencies DBZ-1943

    • Remove deprecated connector option value "initial_schema_only" DBZ-1945

    • Add docs for mask column and truncate column features DBZ-1954

    • Upgrade MongoDB driver to 3.12.3 DBZ-1958

    • Remove deprecated unwrap SMTs DBZ-1968

    \ No newline at end of file diff --git a/releases/1.3/index.html b/releases/1.3/index.html index d63920295c..4092d450b4 100644 --- a/releases/1.3/index.html +++ b/releases/1.3/index.html @@ -1 +1 @@ - Debezium Release Series 1.3

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.12.3
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.12
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    1.3.0.Beta1

    2020-08-28
    \ No newline at end of file + Debezium Release Series 1.3

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.16
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.12.3
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.12
    Oracle Database: 11g, 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    1.3.0.Beta1

    2020-08-28
    \ No newline at end of file diff --git a/releases/1.3/release-notes.html b/releases/1.3/release-notes.html index acbbde262f..da6931ea6c 100644 --- a/releases/1.3/release-notes.html +++ b/releases/1.3/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.3

    Release Notes for Debezium 1.3

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.3.1.Final (November 12th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Hide stacktrace when default value for SQL Server cannot be parsed DBZ-2642

    Fixes

    • Oracle throw "no snapshot found based on specified time" when running flashback query DBZ-1446

    • SqlExceptions using dbz with Oracle on RDS online logs and logminer DBZ-2624

    • Mining session stopped - task killed/SQL operation cancelled - Oracle LogMiner DBZ-2629

    • Antlr DDL parser fails to interpret BLOB([size]) DBZ-2641

    • WAL logs are not flushed in Postgres Connector DBZ-2653

    • Debezium server Event Hubs plugin support in v1.3 DBZ-2660

    • Should Allow NonAsciiCharacter in SQL DBZ-2670

    • MariaDB nextval function is not supported in grammar DBZ-2671

    • ChangeRecord informations don’t connect with the TableSchema DBZ-2679

    • Sanitize field name do not santize sub struct field DBZ-2680

    • Debezium fails if a non-existing view with the same name as existing table is dropped DBZ-2688

    • No viable alternative at input error on "min" column DBZ-2738

    Other changes

    • Upgrade MySQL JDBC driver to version 8.0.19 DBZ-2626

    Release 1.3.0.Final (October 1st, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Allow configurable CONNECT_LOG4J_LOGGERS in connect images DBZ-2541

    • MySQL connector - ignore statement-based logs DBZ-2583

    • Add a configuration which sanitizes values in mongodb DBZ-2585

    Fixes

    • Sqlserver connector block cdc cleanup job DBZ-1285

    • Upgrade Guava library due to GuavaCompatibility errors DBZ-2008

    • mongodb-connector NPE in process of MongoDataConverter DBZ-2316

    • Error with UUID-typed collection column DBZ-2512

    • event.processing.failure.handling.mode doesn’t skip unparseable data events DBZ-2563

    • decoderbufs Segmentation fault on timestamp with infinity DBZ-2565

    • MongoDB ExtractNewDocumentState can not extract array of array DBZ-2569

    • New MySQL 8 ALTER USER password options not supported DBZ-2576

    • MariaDB ANTLR parser issue for grant syntax DBZ-2586

    • Debezium Db2 connector fails with tables using BOOLEAN type DBZ-2587

    • db2 connector doesn’t allow to reprocess messages DBZ-2591

    • Missing links in filter and content-based SMT doc DBZ-2593

    • Format error in doc for topic routing and event flattening SMTs DBZ-2596

    • Debezium refers to database instead of schema in Postgres config DBZ-2605

    • NullPointerException thrown when calling getAllTableIds DBZ-2607

    Other changes

    • Coordinate docs work for downstream 1.2 release DBZ-2272

    • Gracefully handle server-side filtered columns DBZ-2495

    • Schema change events fail to be dispatched due to inconsistent case DBZ-2555

    • Use dedicated functional interface for struct generators DBZ-2588

    • Remove obsolete note from docs DBZ-2590

    • Intermittent test failure on CI - ReplicationConnectionIT#shouldResumeFromLastReceivedLSN DBZ-2435

    • Intermittent test failure on CI - PostgresConnectorIT#shouldExecuteOnConnectStatements DBZ-2468

    • Intermittent test failure on CI - AbstractSqlServerDatatypesTest#stringTypes() DBZ-2474

    • Intermittent test failure on CI - PostgresConnectorIT#customSnapshotterSkipsTablesOnRestart() DBZ-2544

    • Intermittent test failure on CI - SQLServerConnectorIT#verifyOffsets DBZ-2599

    Release 1.3.0.CR1 (September 24th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The SMTs for content-based routing and filtering – both using JSR 223 scripting engines for script evaluation – have been moved from the Debezium core module into a separate artifact (DBZ-2549). This must be added to the plug-in directories of those connector(s) for which you wish to use those SMTs. When using the Debezium container image for Kafka Connect, set the environment variable ENABLE_DEBEZIUM_SCRIPTING to true in order to do so. This change was done so to allow for exposing scripting functionality only in environments with an appropriately secured Kafka Connect configuration interface.

    New Features

    • Describe configurations options for auto-created change data topics DBZ-78

    Fixes

    • Outbox | Heartbeat not working when using ByteBufferConverter DBZ-2396

    • Catch up streaming before snapshot may duplicate messages upon resuming streaming DBZ-2550

    • Fix Quarkus datasource configuration for Quarkus 1.9 DBZ-2558

    Other changes

    • Show custom images instead of S2I in docs DBZ-2236

    • Add Db2 tests to OpenShift test-suite and CI DBZ-2383

    • Implement connection retry support for Oracle DBZ-2531

    • Extract scripting SMTs into a separate module with separate installation package DBZ-2549

    • Format updates in doc for topic routing and event flattening SMTs DBZ-2554

    • Coordinate docs work for downstream 1.3 release DBZ-2557

    • Extend connect image build script with ability to add extra libraries DBZ-2560

    • Invalid use of AppProtocol instead of protocol field in OpenShiftUtils service creation method DBZ-2562

    • Doc format updates for better downstream rendering DBZ-2564

    • Prepare revised SMT docs (filter and content-based routing) for downstream DBZ-2567

    • Swap closing square bracket for curly brace in downstream title annotations DBZ-2577

    Release 1.3.0.Beta2 (September 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Certain configuration options (*.whitelist/*.blacklist) could be perceived as promoting racial stereotypes. These options now use a new naming convention: (*.include.list/*.exclude.list) (DBZ-2462). This change should now be implemented for all connectors. It is still possible to use the old names to simplify the migration but a warning is generated upon using the obsolete names.

    New Features

    • Ingest change data from Oracle databases using LogMiner DBZ-137

    • Server-side column filtering in SQL Server connector DBZ-1068

    • Introduce column.include.list for MySQL Connector DBZ-2508

    Fixes

    • Increase Maven version in enforcer plugin DBZ-2281

    • JSON functions in MySQL grammar unsupported DBZ-2453

    • PostgresStreamingChangeEventSource’s replicationStream flushLsn after closed DBZ-2461

    • Fix link rendering for include.list and exclude.list properties DBZ-2476

    • CVE-2019-10172 - security vulnerability DBZ-2509

    • ArrayIndexOutOfBoundsException with excluded column from CDC table DBZ-2522

    • maven-surefire-plugin versions defined twice in parent pom DBZ-2523

    • Connector Type properties has missing displayName property DBZ-2526

    Other changes

    • Allow Postgres snapshotter to set streaming start position DBZ-2094

    • Ability to include Db2 driver in downstream image DBZ-2191

    • Unify representation of events in the documentation DBZ-2226

    • CloudEvents remains TP but has avro support downstream DBZ-2245

    • Document new SMTs: content-based-routing and filtering DBZ-2247

    • Document new Schema Change Topics DBZ-2248

    • Change db2 version in Dockerfile from latest DBZ-2257

    • Prepare DB2 connector doc for TP DBZ-2403

    • Strimzi cluster operator no longer exposes service to access prometheus metrics endpoint DBZ-2407

    • Clarify include/exclude filters for MongoDB are lists of regexps DBZ-2429

    • Mongo SMT dose not support add.fields=patch DBZ-2455

    • Prepare message filtering SMT doc for product release DBZ-2460

    • Avoid divisive language in docs and option names in incubator connectors DBZ-2462

    • Intermittent test failure on CI - FieldRenamesIT DBZ-2464

    • Adjust outbox extension to updated Quarkus semantics DBZ-2465

    • Add a locking mode which doesn’t conflict with DML and existing reads on Percona Server DBZ-2466

    • Ignore SSL issues during release job DBZ-2467

    • [Documentation] Fix Debezium Server documentation for transformations and Google Pub/Sub DBZ-2469

    • Remove unnecessary include/exclude database configuration in order to ensure backwards compatibility in OCP test-suite DBZ-2470

    • Edit the features topic DBZ-2477

    • False negatives by commit message format checker DBZ-2479

    • Document outbox event router SMT DBZ-2480

    • Error when processing commitLogs related to frozen type collections DBZ-2498

    • Doc tweaks required to automatically build Db2 content in downstream user guide DBZ-2500

    • Unify representation of events - part two - update other connector doc DBZ-2501

    • Ability to specify kafka version for OCP ci job DBZ-2502

    • Add ability to configure prefix for the add.fields and add.headers DBZ-2504

    • Upgrade apicurio to 1.3.0.Final DBZ-2507

    • Add more logs to Cassandra Connector DBZ-2510

    • Create Configuration Fields for datatype.propagate.source.type and column.propagate.source.type DBZ-2516

    • Prepare content-based router SMT doc for product release DBZ-2519

    • Add missing ListOfRegex validator to all regex list fields and remove legacy whitelist/blacklist dependents DBZ-2527

    • Add annotations to support splitting files for downstream docs DBZ-2539

    Release 1.3.0.Beta1 (August 28th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Certain configuration options (*.whitelist/*.blacklist) could be perceived as promoting racial stereotypes. These options now use a new naming convention: (*.include.list/*.exclude.list) (DBZ-2171). This change is now implemented for MySQL, PostgreSQL, MongoDB and SQL Server connectors, and the other connectors will follow in the next release. It is still possible to use the old names to simplify the migration but a warning is generated upon using the obsolete names.

    New Features

    • Improve error handling in Cassandra Connector DBZ-2410

    • Add few MySql8 privileges support DBZ-2413

    • Add support for MySql Dynamic Privileges DBZ-2415

    • Support for MySql8 invisible / visible index DBZ-2425

    • Hitting "Unable to unregister the MBean" when stopping an embedded engine DBZ-2427

    Fixes

    • Adding new table to cdc causes the sqlconnector to fail DBZ-2303

    • LSNs in replication slots are not monotonically increasing DBZ-2338

    • Transaction data loss when process restarted DBZ-2397

    • java.lang.NullPointerException in ByLogicalTableRouter.java DBZ-2412

    • Snapshot fails if table or schema contain hyphens DBZ-2452

    Other changes

    • Upgrade OpenShift guide DBZ-1908

    • Avoid divisive language in docs and option names in core connectors DBZ-2171

    • Refactor: Add domain type for LSN DBZ-2200

    • Entries in metrics tables should be linkable DBZ-2375

    • Update some doc file names DBZ-2402

    • Asciidoc throw warnings while building documentation DBZ-2408

    • Upgrade to Kafka 2.6.0 DBZ-2411

    • Confusing way of reporting incorrect DB credentials DBZ-2418

    • Default value for database port isn’t honoured DBZ-2423

    • Update to Quarkus 1.7.1.Final DBZ-2454

    Release 1.3.0.Alpha1 (August 6th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    For the sake of consistency with other Debezium configuration options, the option mongodb.poll.interval.sec of the Debezium MongoDB connector has been deprecated; please use the new option mongodb.poll.interval.ms instead. The deprecated option will be removed in a future Debezium release. (DBZ-2400)

    New Features

    • Postgres and possibly other DB connections are not properly shutdown when the task encounters thread interrupt DBZ-2133

    • More flexible connection options for MongoDB DBZ-2225

    • Sink adapter for Azure Event Hubs DBZ-2282

    • Implement new snapshot mode - initial_only DBZ-2379

    Fixes

    • Ignore non-existing table reported on Aurora via SHOW TABLES DBZ-1939

    • Cassandra connector not getting events DBZ-2086

    • PubSub Sink sends empty records DBZ-2277

    • Skipping LSN is inefficient and does not forward slot position DBZ-2310

    • message size is at least 68x larger for changes with bit varying columns DBZ-2315

    • Change events lost when connnector is restarted while processing transaction with PK update DBZ-2329

    • Error when processing commitLogs related to list-type columns DBZ-2345

    • Fix dependency groupId on Outbox Quarkus Extension documentation DBZ-2367

    • Cannot detect Azure Sql Version DBZ-2373

    • ParallelSnapshotReader sometimes throws NPE DBZ-2387

    Other changes

    • Column default values are not extracted while reading table structure DBZ-1491

    • DataException("Struct schemas do not match.") when recording cellData DBZ-2103

    • Provide container image for Debezium Server DBZ-2147

    • Update binlog client DBZ-2173

    • PostgreSQL test matrix runs incorrect test-suite DBZ-2279

    • Use ARG with defaults for Kafka’s versions and sha when building Kafka Docker image DBZ-2323

    • Modularize doc for PostgreSQL component DBZ-2333

    • Featured posts list broken DBZ-2374

    • Deprecate mongodb.poll.interval.sec and add mongodb.poll.interval.ms. DBZ-2400

    • Test failures on Kafka 1.x CI job DBZ-2332

    • Add configurable restart wait time and connection retires DBZ-2362

    • Support data types from other database engines DBZ-2365

    • Add ProtoBuf support for Debezium Server DBZ-2381

    • Intermittent test failure on CI - SqlServerChangeTableSetIT#addDefaultValue DBZ-2389

    • Intermittent test failure on CI - TablesWithoutPrimaryKeyIT#shouldProcessFromStreaming DBZ-2390

    • Include Azure PostgreSQL guidance in the docs DBZ-2394

    • Update JSON Snippet on MongoDB Docs Page DBZ-2395

    \ No newline at end of file + Release Notes for Debezium 1.3

    Release Notes for Debezium 1.3

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.3.1.Final (November 12th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Hide stacktrace when default value for SQL Server cannot be parsed DBZ-2642

    Fixes

    • Oracle throw "no snapshot found based on specified time" when running flashback query DBZ-1446

    • SqlExceptions using dbz with Oracle on RDS online logs and logminer DBZ-2624

    • Mining session stopped - task killed/SQL operation cancelled - Oracle LogMiner DBZ-2629

    • Antlr DDL parser fails to interpret BLOB([size]) DBZ-2641

    • WAL logs are not flushed in Postgres Connector DBZ-2653

    • Debezium server Event Hubs plugin support in v1.3 DBZ-2660

    • Should Allow NonAsciiCharacter in SQL DBZ-2670

    • MariaDB nextval function is not supported in grammar DBZ-2671

    • ChangeRecord informations don’t connect with the TableSchema DBZ-2679

    • Sanitize field name do not santize sub struct field DBZ-2680

    • Debezium fails if a non-existing view with the same name as existing table is dropped DBZ-2688

    • No viable alternative at input error on "min" column DBZ-2738

    Other changes

    • Upgrade MySQL JDBC driver to version 8.0.19 DBZ-2626

    Release 1.3.0.Final (October 1st, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Allow configurable CONNECT_LOG4J_LOGGERS in connect images DBZ-2541

    • MySQL connector - ignore statement-based logs DBZ-2583

    • Add a configuration which sanitizes values in mongodb DBZ-2585

    Fixes

    • Sqlserver connector block cdc cleanup job DBZ-1285

    • Upgrade Guava library due to GuavaCompatibility errors DBZ-2008

    • mongodb-connector NPE in process of MongoDataConverter DBZ-2316

    • Error with UUID-typed collection column DBZ-2512

    • event.processing.failure.handling.mode doesn’t skip unparseable data events DBZ-2563

    • decoderbufs Segmentation fault on timestamp with infinity DBZ-2565

    • MongoDB ExtractNewDocumentState can not extract array of array DBZ-2569

    • New MySQL 8 ALTER USER password options not supported DBZ-2576

    • MariaDB ANTLR parser issue for grant syntax DBZ-2586

    • Debezium Db2 connector fails with tables using BOOLEAN type DBZ-2587

    • db2 connector doesn’t allow to reprocess messages DBZ-2591

    • Missing links in filter and content-based SMT doc DBZ-2593

    • Format error in doc for topic routing and event flattening SMTs DBZ-2596

    • Debezium refers to database instead of schema in Postgres config DBZ-2605

    • NullPointerException thrown when calling getAllTableIds DBZ-2607

    Other changes

    • Coordinate docs work for downstream 1.2 release DBZ-2272

    • Gracefully handle server-side filtered columns DBZ-2495

    • Schema change events fail to be dispatched due to inconsistent case DBZ-2555

    • Use dedicated functional interface for struct generators DBZ-2588

    • Remove obsolete note from docs DBZ-2590

    • Intermittent test failure on CI - ReplicationConnectionIT#shouldResumeFromLastReceivedLSN DBZ-2435

    • Intermittent test failure on CI - PostgresConnectorIT#shouldExecuteOnConnectStatements DBZ-2468

    • Intermittent test failure on CI - AbstractSqlServerDatatypesTest#stringTypes() DBZ-2474

    • Intermittent test failure on CI - PostgresConnectorIT#customSnapshotterSkipsTablesOnRestart() DBZ-2544

    • Intermittent test failure on CI - SQLServerConnectorIT#verifyOffsets DBZ-2599

    Release 1.3.0.CR1 (September 24th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The SMTs for content-based routing and filtering – both using JSR 223 scripting engines for script evaluation – have been moved from the Debezium core module into a separate artifact (DBZ-2549). This must be added to the plug-in directories of those connector(s) for which you wish to use those SMTs. When using the Debezium container image for Kafka Connect, set the environment variable ENABLE_DEBEZIUM_SCRIPTING to true in order to do so. This change was done so to allow for exposing scripting functionality only in environments with an appropriately secured Kafka Connect configuration interface.

    New Features

    • Describe configurations options for auto-created change data topics DBZ-78

    Fixes

    • Outbox | Heartbeat not working when using ByteBufferConverter DBZ-2396

    • Catch up streaming before snapshot may duplicate messages upon resuming streaming DBZ-2550

    • Fix Quarkus datasource configuration for Quarkus 1.9 DBZ-2558

    Other changes

    • Show custom images instead of S2I in docs DBZ-2236

    • Add Db2 tests to OpenShift test-suite and CI DBZ-2383

    • Implement connection retry support for Oracle DBZ-2531

    • Extract scripting SMTs into a separate module with separate installation package DBZ-2549

    • Format updates in doc for topic routing and event flattening SMTs DBZ-2554

    • Coordinate docs work for downstream 1.3 release DBZ-2557

    • Extend connect image build script with ability to add extra libraries DBZ-2560

    • Invalid use of AppProtocol instead of protocol field in OpenShiftUtils service creation method DBZ-2562

    • Doc format updates for better downstream rendering DBZ-2564

    • Prepare revised SMT docs (filter and content-based routing) for downstream DBZ-2567

    • Swap closing square bracket for curly brace in downstream title annotations DBZ-2577

    Release 1.3.0.Beta2 (September 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Certain configuration options (*.whitelist/*.blacklist) could be perceived as promoting racial stereotypes. These options now use a new naming convention: (*.include.list/*.exclude.list) (DBZ-2462). This change should now be implemented for all connectors. It is still possible to use the old names to simplify the migration but a warning is generated upon using the obsolete names.

    New Features

    • Ingest change data from Oracle databases using LogMiner DBZ-137

    • Server-side column filtering in SQL Server connector DBZ-1068

    • Introduce column.include.list for MySQL Connector DBZ-2508

    Fixes

    • Increase Maven version in enforcer plugin DBZ-2281

    • JSON functions in MySQL grammar unsupported DBZ-2453

    • PostgresStreamingChangeEventSource’s replicationStream flushLsn after closed DBZ-2461

    • Fix link rendering for include.list and exclude.list properties DBZ-2476

    • CVE-2019-10172 - security vulnerability DBZ-2509

    • ArrayIndexOutOfBoundsException with excluded column from CDC table DBZ-2522

    • maven-surefire-plugin versions defined twice in parent pom DBZ-2523

    • Connector Type properties has missing displayName property DBZ-2526

    Other changes

    • Allow Postgres snapshotter to set streaming start position DBZ-2094

    • Ability to include Db2 driver in downstream image DBZ-2191

    • Unify representation of events in the documentation DBZ-2226

    • CloudEvents remains TP but has avro support downstream DBZ-2245

    • Document new SMTs: content-based-routing and filtering DBZ-2247

    • Document new Schema Change Topics DBZ-2248

    • Change db2 version in Dockerfile from latest DBZ-2257

    • Prepare DB2 connector doc for TP DBZ-2403

    • Strimzi cluster operator no longer exposes service to access prometheus metrics endpoint DBZ-2407

    • Clarify include/exclude filters for MongoDB are lists of regexps DBZ-2429

    • Mongo SMT dose not support add.fields=patch DBZ-2455

    • Prepare message filtering SMT doc for product release DBZ-2460

    • Avoid divisive language in docs and option names in incubator connectors DBZ-2462

    • Intermittent test failure on CI - FieldRenamesIT DBZ-2464

    • Adjust outbox extension to updated Quarkus semantics DBZ-2465

    • Add a locking mode which doesn’t conflict with DML and existing reads on Percona Server DBZ-2466

    • Ignore SSL issues during release job DBZ-2467

    • [Documentation] Fix Debezium Server documentation for transformations and Google Pub/Sub DBZ-2469

    • Remove unnecessary include/exclude database configuration in order to ensure backwards compatibility in OCP test-suite DBZ-2470

    • Edit the features topic DBZ-2477

    • False negatives by commit message format checker DBZ-2479

    • Document outbox event router SMT DBZ-2480

    • Error when processing commitLogs related to frozen type collections DBZ-2498

    • Doc tweaks required to automatically build Db2 content in downstream user guide DBZ-2500

    • Unify representation of events - part two - update other connector doc DBZ-2501

    • Ability to specify kafka version for OCP ci job DBZ-2502

    • Add ability to configure prefix for the add.fields and add.headers DBZ-2504

    • Upgrade apicurio to 1.3.0.Final DBZ-2507

    • Add more logs to Cassandra Connector DBZ-2510

    • Create Configuration Fields for datatype.propagate.source.type and column.propagate.source.type DBZ-2516

    • Prepare content-based router SMT doc for product release DBZ-2519

    • Add missing ListOfRegex validator to all regex list fields and remove legacy whitelist/blacklist dependents DBZ-2527

    • Add annotations to support splitting files for downstream docs DBZ-2539

    Release 1.3.0.Beta1 (August 28th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Certain configuration options (*.whitelist/*.blacklist) could be perceived as promoting racial stereotypes. These options now use a new naming convention: (*.include.list/*.exclude.list) (DBZ-2171). This change is now implemented for MySQL, PostgreSQL, MongoDB and SQL Server connectors, and the other connectors will follow in the next release. It is still possible to use the old names to simplify the migration but a warning is generated upon using the obsolete names.

    New Features

    • Improve error handling in Cassandra Connector DBZ-2410

    • Add few MySql8 privileges support DBZ-2413

    • Add support for MySql Dynamic Privileges DBZ-2415

    • Support for MySql8 invisible / visible index DBZ-2425

    • Hitting "Unable to unregister the MBean" when stopping an embedded engine DBZ-2427

    Fixes

    • Adding new table to cdc causes the sqlconnector to fail DBZ-2303

    • LSNs in replication slots are not monotonically increasing DBZ-2338

    • Transaction data loss when process restarted DBZ-2397

    • java.lang.NullPointerException in ByLogicalTableRouter.java DBZ-2412

    • Snapshot fails if table or schema contain hyphens DBZ-2452

    Other changes

    • Upgrade OpenShift guide DBZ-1908

    • Avoid divisive language in docs and option names in core connectors DBZ-2171

    • Refactor: Add domain type for LSN DBZ-2200

    • Entries in metrics tables should be linkable DBZ-2375

    • Update some doc file names DBZ-2402

    • Asciidoc throw warnings while building documentation DBZ-2408

    • Upgrade to Kafka 2.6.0 DBZ-2411

    • Confusing way of reporting incorrect DB credentials DBZ-2418

    • Default value for database port isn’t honoured DBZ-2423

    • Update to Quarkus 1.7.1.Final DBZ-2454

    Release 1.3.0.Alpha1 (August 6th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.5.0 and has been tested with version 2.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.3.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.3.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.3.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    For the sake of consistency with other Debezium configuration options, the option mongodb.poll.interval.sec of the Debezium MongoDB connector has been deprecated; please use the new option mongodb.poll.interval.ms instead. The deprecated option will be removed in a future Debezium release. (DBZ-2400)

    New Features

    • Postgres and possibly other DB connections are not properly shutdown when the task encounters thread interrupt DBZ-2133

    • More flexible connection options for MongoDB DBZ-2225

    • Sink adapter for Azure Event Hubs DBZ-2282

    • Implement new snapshot mode - initial_only DBZ-2379

    Fixes

    • Ignore non-existing table reported on Aurora via SHOW TABLES DBZ-1939

    • Cassandra connector not getting events DBZ-2086

    • PubSub Sink sends empty records DBZ-2277

    • Skipping LSN is inefficient and does not forward slot position DBZ-2310

    • message size is at least 68x larger for changes with bit varying columns DBZ-2315

    • Change events lost when connnector is restarted while processing transaction with PK update DBZ-2329

    • Error when processing commitLogs related to list-type columns DBZ-2345

    • Fix dependency groupId on Outbox Quarkus Extension documentation DBZ-2367

    • Cannot detect Azure Sql Version DBZ-2373

    • ParallelSnapshotReader sometimes throws NPE DBZ-2387

    Other changes

    • Column default values are not extracted while reading table structure DBZ-1491

    • DataException("Struct schemas do not match.") when recording cellData DBZ-2103

    • Provide container image for Debezium Server DBZ-2147

    • Update binlog client DBZ-2173

    • PostgreSQL test matrix runs incorrect test-suite DBZ-2279

    • Use ARG with defaults for Kafka’s versions and sha when building Kafka Docker image DBZ-2323

    • Modularize doc for PostgreSQL component DBZ-2333

    • Featured posts list broken DBZ-2374

    • Deprecate mongodb.poll.interval.sec and add mongodb.poll.interval.ms. DBZ-2400

    • Test failures on Kafka 1.x CI job DBZ-2332

    • Add configurable restart wait time and connection retires DBZ-2362

    • Support data types from other database engines DBZ-2365

    • Add ProtoBuf support for Debezium Server DBZ-2381

    • Intermittent test failure on CI - SqlServerChangeTableSetIT#addDefaultValue DBZ-2389

    • Intermittent test failure on CI - TablesWithoutPrimaryKeyIT#shouldProcessFromStreaming DBZ-2390

    • Include Azure PostgreSQL guidance in the docs DBZ-2394

    • Update JSON Snippet on MongoDB Docs Page DBZ-2395

    \ No newline at end of file diff --git a/releases/1.4/index.html b/releases/1.4/index.html index 255112cd2f..ba73cadfa2 100644 --- a/releases/1.4/index.html +++ b/releases/1.4/index.html @@ -1 +1 @@ - Debezium Release Series 1.4

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.19
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.12.3
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.12
    Oracle Database: 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 8.0.x
    Driver: 7.0.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file + Debezium Release Series 1.4

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.19
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 3.12.3
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.12
    Oracle Database: 12c
    Driver: 12.2.0.1
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 8.0.x
    Driver: 7.0.0

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file diff --git a/releases/1.4/release-notes.html b/releases/1.4/release-notes.html index 332db9aaae..cc1be28414 100644 --- a/releases/1.4/release-notes.html +++ b/releases/1.4/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.4

    Release Notes for Debezium 1.4

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.4.2.Final (March 1st, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    In earlier versions of Debezium, the MySQL connector incorrectly emitted snapshot events using the c (create) operation type instead of the correct type r (read). If you have consumers which rely on that earlier behavior, you can use the io.debezium.connector.mysql.transforms.ReadToInsertEvent single message transform to emulate that earlier behavior (DBZ-2788). A connector option which accidentally was introduced in 1.4.0 for this same purpose, snapshot.events.as.inserts, got removed again, and the SMT should be used instead in this situation. This SMT is meant for migration purposes only and will be removed in a future Debezium version.

    The (incubating) Debezium connector for Oracle emits transaction ids in lower-case now, differing from the previous behavior of returning them as upper-case (DBZ-3165).

    The previously deprecated snapshot mode INITIAL_SCHEMA_ONLY of the Oracle connector has been removed. Please use SCHEMA_ONLY instead (DBZ-3034).

    New Features

    • Make field descriptions consistent for time values (milliseconds, ms, sec, seconds, etc) DBZ-2858

    • Detect and skip non-parent index-organized tables DBZ-3036

    • Capture additional JMX metrics for LogMiner DBZ-3038

    • SqlServerConnector does not implement validate DBZ-3056

    • Improve DML parser performance DBZ-3078

    • Add ability to skip tests based on available database options DBZ-3110

    • Capture LogMiner session parameters when session fails to start DBZ-3153

    Fixes

    • Wrong reference to KafkaConnector in setting up Debezium DBZ-2745

    • Oracle Connector(Using Logminer) with Oracle RDS (v12) does not capture changes DBZ-2754

    • Oracle connector causes ORA-65090 when connecting to an Oracle instance running in non-CDB mode DBZ-2795

    • Warnings and notifications from PostgreSQL are ignored by the connector until the connection is closed DBZ-2865

    • ExtractNewRecord SMT incorrectly extracts ts_ms from source info DBZ-2984

    • Replication terminates with ORA-01291: missing log file DBZ-3001

    • Support multiple schemas with Oracle LogMiner DBZ-3009

    • Documentation LOCK_TABLES should be LOCK TABLES DBZ-3013

    • Complete support for properties that contain hyphens DBZ-3019

    • SQLException for Global temp tables from OracleDatabaseMetaData.getIndexInfo() makes Debezium snapshotting fail DBZ-3057

    • no viable alternative at input 'create or replace index' DBZ-3067

    • Strange transaction metadata for Oracle logminer connector DBZ-3090

    • Environment Variables with spaces are truncated when written to properties file DBZ-3103

    • Error: Supplemental logging not configured for table. Use command: ALTER TABLE DBZ-3109

    • Final stage of snapshot analyzes tables not present in table.include.list thus stumbles upon unsupported XMLTYPE table DBZ-3151

    • Forever stuck with new binlog parser (1.3 and later) when processing big JSON column data DBZ-3168

    • XStream does not process NUMER(1) data DBZ-3172

    • Replace MySQL connector option with SMT for mitigating wrong op flag DBZ-2788

    • DML parser IndexOutOfRangeException with where-clause using "IS NULL" DBZ-3193

    • ORA-01284 file cannot be opened error when file locked by another process DBZ-3194

    • CommitThroughput metrics can raise division by zero error DBZ-3200

    Other changes

    • OSD certification DBZ-2813

    • Integration with Service Registry promoted to GA DBZ-2815

    • Use new deployment endpoint for releases to Maven Central DBZ-3069

    • Remove zero-width whitespace from option names DBZ-3087

    • Remove duplicate anchor links in Connector properties DBZ-3111

    • Config validation for Oracle DBZ-3119

    • Clarify required privileges for using pgoutput DBZ-3138

    • Update Oracle documentation DBZ-3156

    • Put IIDR license requirement into NOTE box DBZ-3163

    • Remove COLUMN_BLACK_LIST option in Oracle connector DBZ-3167

    • Minor editorial update to PostgreSQL connector documentation DBZ-3192

    • Incorrect link/anchor pair for truncate.handling.mode property in PG properties documentation DBZ-3195

    Release 1.4.1.Final (January 28th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Clarify information in Debezium connector for SQL Server doc DBZ-2675

    • Add support for binary.handling.mode to the SQL Server connector DBZ-2912

    • Use collation to get charset when charset is not set DBZ-2922

    • Additional logging for number and type of sql operations DBZ-2980

    • Retry on "The server failed to resume the transaction" DBZ-2959

    Fixes

    • Debezium Connectors are failing while reading binlog: Unknown event type 100 DBZ-2499

    • Some column default values are not extracted correctly while reading table structure DBZ-2698

    • Supplemental logging is required for entire database rather than per monitored table DBZ-2711

    • Missing log file error when current SCN differs from snapshotted in Oracle connector and Logminer DBZ-2855

    • GitHub action for "Build Testing Workflow" is using old artifacts and not building missing dependencies DBZ-2861

    • Deadlock in the XStream handler and offset commiter call concurrently DBZ-2891

    • Sanitise DECIMAL string from VStream DBZ-2906

    • Vitess Connector download link missing on website DBZ-2907

    • DML statements longer than 4000 characters are incorrectly combined from V$LOGMNR_CONTENTS DBZ-2920

    • Default database charset is not recorded DBZ-2921

    • Instable test: PostgresConnectorIT#testCustomSnapshotterSnapshotCompleteLifecycleHook() DBZ-2938

    • Snapshot causes ORA-08181 exception DBZ-2949

    • Postgres connector config validation fails because current connector is occupying replication slot DBZ-2952

    • Labeled create procedure’s body is not parsed DBZ-2972

    • Debezium swallows DML exception in certain cases DBZ-2981

    Other changes

    • Migrate website build to Hugo DBZ-575

    • Test binary/varbinary datatypes DBZ-2174

    • Implement Scn as a domain type DBZ-2518

    • Fix docs for message.key.columns and skipped.operations DBZ-2572

    • Upgrade to Apache Kafka Connect 2.6.1 DBZ-2630

    • Centralize postgres image name for test container tests DBZ-2764

    • Add missing connector options for Postgres connector DBZ-2807

    • Importing TestDatabase as QuarkusTestResource for IT tests DBZ-2868

    • Set up Pulsar via Testcontainers in PulsarIT DBZ-2915

    • Remove blacklist and whitelist from anchor link text in documentation DBZ-2918

    • Instable test: PostgresShutdownIT#shouldStopOnPostgresFastShutdown() DBZ-2923

    • Rename whitelist/blacklist configs in examples to include/exclude DBZ-2925

    • Misspelling in readme for db2 connector DBZ-2940

    • Fetch correct Apicurio version for ApicurioRegistryTest DBZ-2945

    • Incorrect link IDs in SQL Server connector snapshot metrics table DBZ-2958

    Release 1.4.0.Final (January 7th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Improve error reporting from DDL parser DBZ-2366

    • Support TNS Names and full RAC connection strings DBZ-2859

    • Add more comprehensible logs to FIELD event DBZ-2873

    Fixes

    • AWS RDS has different role names which make connector validation fail DBZ-2800

    • Archive Log mining does not work with Logminer DBZ-2825

    • MySQL parser error for comments starting with tab DBZ-2840

    • Connector fails when using '$' sign in column name. DBZ-2849

    • Connection adapter not passed to Surefire tests DBZ-2856

    • Unsupported MariaDB syntax for generated columns DBZ-2882

    • SLF4J API should not be included in Oracle distirbution DBZ-2890

    • Vitess distro contains unaligned deps DBZ-2892

    • Changing base packages does not always trigger full builds DBZ-2896

    • LogMiner causes DataException when DATE field is specified as NOT NULL DBZ-2784

    Other changes

    • Remove LegacyDdlParser and related code DBZ-2167

    • Add MongoDB connector interface DBZ-2808

    • sanitize.field.names support for Vitess Connector DBZ-2851

    • Explicitly declare to Quarkus that ORM XML mapping is required for the outbox extension DBZ-2860

    • Upgrade MySQL JDBC driver to 8.0.21 DBZ-2887

    • Upgrade Guava library to 30.0 DBZ-2888

    • Avoid exception when payload id field not present DBZ-2889

    Release 1.4.0.CR1 (December 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Documentation of the Logminer implementation needs improvement DBZ-2799

    • Update Vitess Connector documentation DBZ-2854

    • Add Cassandra to tutorial Compose set-up DBZ-1463

    • Add support for Vitess gRPC static authentication DBZ-2852

    Fixes

    • Document "database.oracle.version" option DBZ-2603

    • Remove link in MySQL docs section that points to the same section DBZ-2710

    • Oracle schema history events fail on partitioned table DBZ-2841

    • outbox extension emits UPDATE events when delete is disabled DBZ-2847

    Other changes

    • Move Cassandra connector to separate repository DBZ-2636

    • Invalid column name should fail connector with meaningful message DBZ-2836

    • Fix typos in downstream ModuleID declarations in monitoring.adoc DBZ-2838

    • Duplicate anchor ID in partials/ref-connector-monitoring-snapshot-metrics.adoc DBZ-2839

    • Fix additional typo in ModuleID declaration in monitoring.adoc DBZ-2843

    • Edit modularization annotations in logging.adoc DBZ-2846

    • Update Groovy version to 3.0.7 DBZ-2850

    Release 1.4.0.Beta1 (December 9th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Add support for distributed tracing DBZ-559

    • Outbox Quarkus extension: Support OpenTracing DBZ-1818

    • Upgrade MongoDB driver to 4.x to run in native mode in GraalVM (for Quarkus extension) DBZ-2138

    • Allow snapshot records be generated either as create or read for MySQL connector DBZ-2775

    • Support in Db2 connector for lowercase table and schema names DBZ-2796

    • option to kill process when engine run crashes DBZ-2785

    • Add support for using Vitess primary key as Kafka message key DBZ-2578

    • Add support for Nullable columns DBZ-2579

    • Tablespace name LOGMINER_TBS should not be hardcoded in the Java code DBZ-2797

    Fixes

    • DDL parser: Allow stored procedure variables in LIMIT clause DBZ-2692

    • Wrong mysql command in openshift dpeloyment docs DBZ-2746

    • long running transaction will be abandoned and ignored DBZ-2759

    • MS SQL Decimal with default value not matching the scale of the column definition cause exception DBZ-2767

    • Cassandra Connector doesn’t shut down completely DBZ-2768

    • MySQL Parser fails for BINARY collation shortcut DBZ-2771

    • PostgresConnectorIT.shouldResumeStreamingFromSlotPositionForCustomSnapshot is failing for wal2json on CI DBZ-2772

    • Connector configuration property "database.out.server.name" is not relevant for Logminer implementation but cannot be omitted DBZ-2801

    • CHARACTER VARYING mysql identifier for varchar is not supported in debezium DBZ-2821

    • try-with-resources should not be used when OkHttp Response object is returned DBZ-2827

    • EmbeddedEngine does not shutdown when commitOffsets is interrupted DBZ-2830

    • Rename user command parsing fails DBZ-2743

    Other changes

    • Fix splitter annotations that control how content is modularized downstream DBZ-2824

    • VerifyRecord#isValid() compares JSON schema twice instead of Avro DBZ-735

    • Don’t rely on deprecated JSON serialization functionality of MongoDB driver DBZ-1322

    • Move website build to GitHub Actions DBZ-1984

    • Move Db2 connector to separate repository DBZ-2001

    • Modularize doc for SQL Server component DBZ-2335

    • Upgrade apicurio to 1.3.2.Final DBZ-2561

    • Remove obsolete logging files from /partials directory DBZ-2740

    • Remove obsolete monitoring files from /partials directory DBZ-2741

    • Increase Oracle CI frequency DBZ-2744

    • Make Debezium example work with Podman instead of Docker DBZ-2753

    • Disable log mining history by default DBZ-2763

    • Upgrade -setup-java action to the latest 1.4.3 DBZ-2770

    • Trigger non-core connector tests when core or DDL parser module are changed DBZ-2773

    • Add support for unsigned integer types DBZ-2776

    • Update JDK action workflow matrix with JDK 16.0.0-ea.24 DBZ-2777

    • Auto resolve latest JDK EA release number DBZ-2781

    • Update content in modularized SQL Server connector doc DBZ-2782

    Release 1.4.0.Alpha2 (November 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Move testcontainers changes on DebeziumContainer from UI PoC backend to Debezium main repo DBZ-2602

    • Add ability to map new name for the fields and headers DBZ-2606

    • Add close call to the Snapshotter interface DBZ-2608

    • Overriding Character Set Mapping DBZ-2673

    • Support PostgreSQL connector retry when database is restarted DBZ-2685

    • Cassandra connector documentation typos DBZ-2701

    • Fix typo in converters doc DBZ-2717

    • Add tests for DBZ-2617: PG connector does not enter FAILED state on failing heartbeats DBZ-2724

    • DBZ-2662 Control ChangeEventQueue by the size in bytes DBZ-2662

    Fixes

    • Oracle throw "no snapshot found based on specified time" when running flashback query DBZ-1446

    • Exception when PK definition precedes column definition DBZ-2580

    • Patroni can’t stop PostgreSQL when Debezium is streaming DBZ-2617

    • ChangeRecord informations don’t connect with the TableSchema DBZ-2679

    • MySQL connector fails on a zero date DBZ-2682

    • Oracle LogMiner doesn’t support partition tables DBZ-2683

    • DB2 doesn’t start reliably in OCP DBZ-2693

    • Dropped columns cause NPE in SqlServerConnector DBZ-2716

    • Timestamp default value in 'yyyy-mm-dd' format fails MySQL connector DBZ-2726

    • Connection timeout on write should retry DBZ-2727

    • No viable alternative at input error on "min" column DBZ-2738

    • SQLServer CI error in SqlServerConnectorIT.whenCaptureInstanceExcludesColumnsAndColumnsRenamedExpectNoErrors:1473 DBZ-2747

    • debezium-connector-db2: DB2 SQL Error: SQLCODE=-206 on DB2 for z/OS DBZ-2755

    • no viable alternative at input 'alter table order drop CONSTRAINT' DBZ-2760

    • Tests are failing on macos DBZ-2762

    Other changes

    • Move CI to Github Actions for all repositories DBZ-1720

    • Privileges missing from setup in documentation - Oracle LogMiner connector DBZ-2628

    • Add validation that replication slot doesn’t exist DBZ-2637

    • Update OpenJDK Quality Outreach jobs DBZ-2638

    • Re-unify monitoring content in the operations/monitoring.adoc file DBZ-2659

    • Pull oracle specific changes for reading table column metadata into debezium-core DBZ-2690

    • Intermittent test failure on CI - PostgresConnectorIT#shouldRegularlyFlushLsnWithTxMonitoring DBZ-2704

    • Topic routing doc formatting fix DBZ-2708

    • Re-unify logging content in the operations/logging.adoc file DBZ-2721

    • Incorporate Oracle LogMiner implementation updates DBZ-2729

    • Upgrade Vitess docker image to Vitess 8.0.0 DBZ-2749

    • Intermittent SQL Server test failure on CI - SqlServerConnectorIT DBZ-2625

    • Change initial.sync.max.threads to snapshot.max.threads DBZ-2742

    Release 1.4.0.Alpha1 (October 22nd, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Allow to specify subset of captured tables to be snapshotted DBZ-2456

    • Implement snapshot select override behavior for MongoDB DBZ-2496

    • Asciidoc block titles are rendered the same as regular text DBZ-2631

    • Allow closing of hung JDBC connection DBZ-2632

    • Hide stacktrace when default value for SQL Server cannot be parsed DBZ-2642

    • Implement a CDC connector for Vitess DBZ-2463

    • SqlServer - Skip processing of LSNs not associated with change table entries. DBZ-2582

    Fixes

    • Cant override environment variables DBZ-2559

    • Inconsistencies in PostgreSQL Connector Docs DBZ-2584

    • ConcurrentModificationException during exporting data for a mongodb collection in a sharded cluster DBZ-2597

    • Mysql connector didn’t pass the default db charset to the column definition DBZ-2604

    • [Doc] "registry.redhat.io/amq7/amq-streams-kafka-25: unknown: Not Found" error occurs DBZ-2609

    • [Doc] "Error: no context directory and no Containerfile specified" error occurs DBZ-2610

    • SqlExceptions using dbz with Oracle on RDS online logs and logminer DBZ-2624

    • Mining session stopped - task killed/SQL operation cancelled - Oracle LogMiner DBZ-2629

    • Unparseable DDL: Using 'trigger' as table alias in view creation DBZ-2639

    • Antlr DDL parser fails to interpret BLOB([size]) DBZ-2641

    • MySQL Connector keeps stale offset metadata after snapshot.new.tables is changed DBZ-2643

    • WAL logs are not flushed in Postgres Connector DBZ-2653

    • Debezium server Event Hubs plugin support in v1.3 DBZ-2660

    • Cassandra Connector doesn’t use log4j for logging correctly DBZ-2661

    • Should Allow NonAsciiCharacter in SQL DBZ-2670

    • MariaDB nextval function is not supported in grammar DBZ-2671

    • Sanitize field name do not santize sub struct field DBZ-2680

    • Debezium fails if a non-existing view with the same name as existing table is dropped DBZ-2688

    Other changes

    • Merge MySQL doc source files into one again DBZ-2127

    • Metrics links duplicate anchor IDs DBZ-2497

    • Slim down Vitess container image DBZ-2551

    • Modify release peipeline to support per-connector repos e.g. Vitess DBZ-2611

    • Add Vitess connector to Kafka Connect container image DBZ-2618

    • User Guide Documentation corrections for PostgreSQL DBZ-2621

    • Checkstyle should be built as a part of GH check formatting action DBZ-2623

    • Upgrade MySQL JDBC driver to version 8.0.19 DBZ-2626

    • Add support for multiple shard GTIDs in VGTID DBZ-2635

    • Add documentation for Vitess connector DBZ-2645

    • Restrict matrix job configurations to run only on Slaves DBZ-2648

    • Upgrade JUnit to 4.13.1 DBZ-2658

    • Avoid parsing generated files in Checkstyle DBZ-2669

    • Update debezium/awestruct image to use Antora 2.3.4 DBZ-2674

    • Fix doc typos and minor format glitches for downstream rendering DBZ-2681

    • Intermittent test failure on CI - RecordsStreamProducerIT#shouldReceiveHeartbeatAlsoWhenChangingNonWhitelistedTable() DBZ-2344

    \ No newline at end of file + Release Notes for Debezium 1.4

    Release Notes for Debezium 1.4

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.4.2.Final (March 1st, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    In earlier versions of Debezium, the MySQL connector incorrectly emitted snapshot events using the c (create) operation type instead of the correct type r (read). If you have consumers which rely on that earlier behavior, you can use the io.debezium.connector.mysql.transforms.ReadToInsertEvent single message transform to emulate that earlier behavior (DBZ-2788). A connector option which accidentally was introduced in 1.4.0 for this same purpose, snapshot.events.as.inserts, got removed again, and the SMT should be used instead in this situation. This SMT is meant for migration purposes only and will be removed in a future Debezium version.

    The (incubating) Debezium connector for Oracle emits transaction ids in lower-case now, differing from the previous behavior of returning them as upper-case (DBZ-3165).

    The previously deprecated snapshot mode INITIAL_SCHEMA_ONLY of the Oracle connector has been removed. Please use SCHEMA_ONLY instead (DBZ-3034).

    New Features

    • Make field descriptions consistent for time values (milliseconds, ms, sec, seconds, etc) DBZ-2858

    • Detect and skip non-parent index-organized tables DBZ-3036

    • Capture additional JMX metrics for LogMiner DBZ-3038

    • SqlServerConnector does not implement validate DBZ-3056

    • Improve DML parser performance DBZ-3078

    • Add ability to skip tests based on available database options DBZ-3110

    • Capture LogMiner session parameters when session fails to start DBZ-3153

    Fixes

    • Wrong reference to KafkaConnector in setting up Debezium DBZ-2745

    • Oracle Connector(Using Logminer) with Oracle RDS (v12) does not capture changes DBZ-2754

    • Oracle connector causes ORA-65090 when connecting to an Oracle instance running in non-CDB mode DBZ-2795

    • Warnings and notifications from PostgreSQL are ignored by the connector until the connection is closed DBZ-2865

    • ExtractNewRecord SMT incorrectly extracts ts_ms from source info DBZ-2984

    • Replication terminates with ORA-01291: missing log file DBZ-3001

    • Support multiple schemas with Oracle LogMiner DBZ-3009

    • Documentation LOCK_TABLES should be LOCK TABLES DBZ-3013

    • Complete support for properties that contain hyphens DBZ-3019

    • SQLException for Global temp tables from OracleDatabaseMetaData.getIndexInfo() makes Debezium snapshotting fail DBZ-3057

    • no viable alternative at input 'create or replace index' DBZ-3067

    • Strange transaction metadata for Oracle logminer connector DBZ-3090

    • Environment Variables with spaces are truncated when written to properties file DBZ-3103

    • Error: Supplemental logging not configured for table. Use command: ALTER TABLE DBZ-3109

    • Final stage of snapshot analyzes tables not present in table.include.list thus stumbles upon unsupported XMLTYPE table DBZ-3151

    • Forever stuck with new binlog parser (1.3 and later) when processing big JSON column data DBZ-3168

    • XStream does not process NUMER(1) data DBZ-3172

    • Replace MySQL connector option with SMT for mitigating wrong op flag DBZ-2788

    • DML parser IndexOutOfRangeException with where-clause using "IS NULL" DBZ-3193

    • ORA-01284 file cannot be opened error when file locked by another process DBZ-3194

    • CommitThroughput metrics can raise division by zero error DBZ-3200

    Other changes

    • OSD certification DBZ-2813

    • Integration with Service Registry promoted to GA DBZ-2815

    • Use new deployment endpoint for releases to Maven Central DBZ-3069

    • Remove zero-width whitespace from option names DBZ-3087

    • Remove duplicate anchor links in Connector properties DBZ-3111

    • Config validation for Oracle DBZ-3119

    • Clarify required privileges for using pgoutput DBZ-3138

    • Update Oracle documentation DBZ-3156

    • Put IIDR license requirement into NOTE box DBZ-3163

    • Remove COLUMN_BLACK_LIST option in Oracle connector DBZ-3167

    • Minor editorial update to PostgreSQL connector documentation DBZ-3192

    • Incorrect link/anchor pair for truncate.handling.mode property in PG properties documentation DBZ-3195

    Release 1.4.1.Final (January 28th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Clarify information in Debezium connector for SQL Server doc DBZ-2675

    • Add support for binary.handling.mode to the SQL Server connector DBZ-2912

    • Use collation to get charset when charset is not set DBZ-2922

    • Additional logging for number and type of sql operations DBZ-2980

    • Retry on "The server failed to resume the transaction" DBZ-2959

    Fixes

    • Debezium Connectors are failing while reading binlog: Unknown event type 100 DBZ-2499

    • Some column default values are not extracted correctly while reading table structure DBZ-2698

    • Supplemental logging is required for entire database rather than per monitored table DBZ-2711

    • Missing log file error when current SCN differs from snapshotted in Oracle connector and Logminer DBZ-2855

    • GitHub action for "Build Testing Workflow" is using old artifacts and not building missing dependencies DBZ-2861

    • Deadlock in the XStream handler and offset commiter call concurrently DBZ-2891

    • Sanitise DECIMAL string from VStream DBZ-2906

    • Vitess Connector download link missing on website DBZ-2907

    • DML statements longer than 4000 characters are incorrectly combined from V$LOGMNR_CONTENTS DBZ-2920

    • Default database charset is not recorded DBZ-2921

    • Instable test: PostgresConnectorIT#testCustomSnapshotterSnapshotCompleteLifecycleHook() DBZ-2938

    • Snapshot causes ORA-08181 exception DBZ-2949

    • Postgres connector config validation fails because current connector is occupying replication slot DBZ-2952

    • Labeled create procedure’s body is not parsed DBZ-2972

    • Debezium swallows DML exception in certain cases DBZ-2981

    Other changes

    • Migrate website build to Hugo DBZ-575

    • Test binary/varbinary datatypes DBZ-2174

    • Implement Scn as a domain type DBZ-2518

    • Fix docs for message.key.columns and skipped.operations DBZ-2572

    • Upgrade to Apache Kafka Connect 2.6.1 DBZ-2630

    • Centralize postgres image name for test container tests DBZ-2764

    • Add missing connector options for Postgres connector DBZ-2807

    • Importing TestDatabase as QuarkusTestResource for IT tests DBZ-2868

    • Set up Pulsar via Testcontainers in PulsarIT DBZ-2915

    • Remove blacklist and whitelist from anchor link text in documentation DBZ-2918

    • Instable test: PostgresShutdownIT#shouldStopOnPostgresFastShutdown() DBZ-2923

    • Rename whitelist/blacklist configs in examples to include/exclude DBZ-2925

    • Misspelling in readme for db2 connector DBZ-2940

    • Fetch correct Apicurio version for ApicurioRegistryTest DBZ-2945

    • Incorrect link IDs in SQL Server connector snapshot metrics table DBZ-2958

    Release 1.4.0.Final (January 7th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Improve error reporting from DDL parser DBZ-2366

    • Support TNS Names and full RAC connection strings DBZ-2859

    • Add more comprehensible logs to FIELD event DBZ-2873

    Fixes

    • AWS RDS has different role names which make connector validation fail DBZ-2800

    • Archive Log mining does not work with Logminer DBZ-2825

    • MySQL parser error for comments starting with tab DBZ-2840

    • Connector fails when using '$' sign in column name. DBZ-2849

    • Connection adapter not passed to Surefire tests DBZ-2856

    • Unsupported MariaDB syntax for generated columns DBZ-2882

    • SLF4J API should not be included in Oracle distirbution DBZ-2890

    • Vitess distro contains unaligned deps DBZ-2892

    • Changing base packages does not always trigger full builds DBZ-2896

    • LogMiner causes DataException when DATE field is specified as NOT NULL DBZ-2784

    Other changes

    • Remove LegacyDdlParser and related code DBZ-2167

    • Add MongoDB connector interface DBZ-2808

    • sanitize.field.names support for Vitess Connector DBZ-2851

    • Explicitly declare to Quarkus that ORM XML mapping is required for the outbox extension DBZ-2860

    • Upgrade MySQL JDBC driver to 8.0.21 DBZ-2887

    • Upgrade Guava library to 30.0 DBZ-2888

    • Avoid exception when payload id field not present DBZ-2889

    Release 1.4.0.CR1 (December 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Documentation of the Logminer implementation needs improvement DBZ-2799

    • Update Vitess Connector documentation DBZ-2854

    • Add Cassandra to tutorial Compose set-up DBZ-1463

    • Add support for Vitess gRPC static authentication DBZ-2852

    Fixes

    • Document "database.oracle.version" option DBZ-2603

    • Remove link in MySQL docs section that points to the same section DBZ-2710

    • Oracle schema history events fail on partitioned table DBZ-2841

    • outbox extension emits UPDATE events when delete is disabled DBZ-2847

    Other changes

    • Move Cassandra connector to separate repository DBZ-2636

    • Invalid column name should fail connector with meaningful message DBZ-2836

    • Fix typos in downstream ModuleID declarations in monitoring.adoc DBZ-2838

    • Duplicate anchor ID in partials/ref-connector-monitoring-snapshot-metrics.adoc DBZ-2839

    • Fix additional typo in ModuleID declaration in monitoring.adoc DBZ-2843

    • Edit modularization annotations in logging.adoc DBZ-2846

    • Update Groovy version to 3.0.7 DBZ-2850

    Release 1.4.0.Beta1 (December 9th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Add support for distributed tracing DBZ-559

    • Outbox Quarkus extension: Support OpenTracing DBZ-1818

    • Upgrade MongoDB driver to 4.x to run in native mode in GraalVM (for Quarkus extension) DBZ-2138

    • Allow snapshot records be generated either as create or read for MySQL connector DBZ-2775

    • Support in Db2 connector for lowercase table and schema names DBZ-2796

    • option to kill process when engine run crashes DBZ-2785

    • Add support for using Vitess primary key as Kafka message key DBZ-2578

    • Add support for Nullable columns DBZ-2579

    • Tablespace name LOGMINER_TBS should not be hardcoded in the Java code DBZ-2797

    Fixes

    • DDL parser: Allow stored procedure variables in LIMIT clause DBZ-2692

    • Wrong mysql command in openshift dpeloyment docs DBZ-2746

    • long running transaction will be abandoned and ignored DBZ-2759

    • MS SQL Decimal with default value not matching the scale of the column definition cause exception DBZ-2767

    • Cassandra Connector doesn’t shut down completely DBZ-2768

    • MySQL Parser fails for BINARY collation shortcut DBZ-2771

    • PostgresConnectorIT.shouldResumeStreamingFromSlotPositionForCustomSnapshot is failing for wal2json on CI DBZ-2772

    • Connector configuration property "database.out.server.name" is not relevant for Logminer implementation but cannot be omitted DBZ-2801

    • CHARACTER VARYING mysql identifier for varchar is not supported in debezium DBZ-2821

    • try-with-resources should not be used when OkHttp Response object is returned DBZ-2827

    • EmbeddedEngine does not shutdown when commitOffsets is interrupted DBZ-2830

    • Rename user command parsing fails DBZ-2743

    Other changes

    • Fix splitter annotations that control how content is modularized downstream DBZ-2824

    • VerifyRecord#isValid() compares JSON schema twice instead of Avro DBZ-735

    • Don’t rely on deprecated JSON serialization functionality of MongoDB driver DBZ-1322

    • Move website build to GitHub Actions DBZ-1984

    • Move Db2 connector to separate repository DBZ-2001

    • Modularize doc for SQL Server component DBZ-2335

    • Upgrade apicurio to 1.3.2.Final DBZ-2561

    • Remove obsolete logging files from /partials directory DBZ-2740

    • Remove obsolete monitoring files from /partials directory DBZ-2741

    • Increase Oracle CI frequency DBZ-2744

    • Make Debezium example work with Podman instead of Docker DBZ-2753

    • Disable log mining history by default DBZ-2763

    • Upgrade -setup-java action to the latest 1.4.3 DBZ-2770

    • Trigger non-core connector tests when core or DDL parser module are changed DBZ-2773

    • Add support for unsigned integer types DBZ-2776

    • Update JDK action workflow matrix with JDK 16.0.0-ea.24 DBZ-2777

    • Auto resolve latest JDK EA release number DBZ-2781

    • Update content in modularized SQL Server connector doc DBZ-2782

    Release 1.4.0.Alpha2 (November 16th, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Move testcontainers changes on DebeziumContainer from UI PoC backend to Debezium main repo DBZ-2602

    • Add ability to map new name for the fields and headers DBZ-2606

    • Add close call to the Snapshotter interface DBZ-2608

    • Overriding Character Set Mapping DBZ-2673

    • Support PostgreSQL connector retry when database is restarted DBZ-2685

    • Cassandra connector documentation typos DBZ-2701

    • Fix typo in converters doc DBZ-2717

    • Add tests for DBZ-2617: PG connector does not enter FAILED state on failing heartbeats DBZ-2724

    • DBZ-2662 Control ChangeEventQueue by the size in bytes DBZ-2662

    Fixes

    • Oracle throw "no snapshot found based on specified time" when running flashback query DBZ-1446

    • Exception when PK definition precedes column definition DBZ-2580

    • Patroni can’t stop PostgreSQL when Debezium is streaming DBZ-2617

    • ChangeRecord informations don’t connect with the TableSchema DBZ-2679

    • MySQL connector fails on a zero date DBZ-2682

    • Oracle LogMiner doesn’t support partition tables DBZ-2683

    • DB2 doesn’t start reliably in OCP DBZ-2693

    • Dropped columns cause NPE in SqlServerConnector DBZ-2716

    • Timestamp default value in 'yyyy-mm-dd' format fails MySQL connector DBZ-2726

    • Connection timeout on write should retry DBZ-2727

    • No viable alternative at input error on "min" column DBZ-2738

    • SQLServer CI error in SqlServerConnectorIT.whenCaptureInstanceExcludesColumnsAndColumnsRenamedExpectNoErrors:1473 DBZ-2747

    • debezium-connector-db2: DB2 SQL Error: SQLCODE=-206 on DB2 for z/OS DBZ-2755

    • no viable alternative at input 'alter table order drop CONSTRAINT' DBZ-2760

    • Tests are failing on macos DBZ-2762

    Other changes

    • Move CI to Github Actions for all repositories DBZ-1720

    • Privileges missing from setup in documentation - Oracle LogMiner connector DBZ-2628

    • Add validation that replication slot doesn’t exist DBZ-2637

    • Update OpenJDK Quality Outreach jobs DBZ-2638

    • Re-unify monitoring content in the operations/monitoring.adoc file DBZ-2659

    • Pull oracle specific changes for reading table column metadata into debezium-core DBZ-2690

    • Intermittent test failure on CI - PostgresConnectorIT#shouldRegularlyFlushLsnWithTxMonitoring DBZ-2704

    • Topic routing doc formatting fix DBZ-2708

    • Re-unify logging content in the operations/logging.adoc file DBZ-2721

    • Incorporate Oracle LogMiner implementation updates DBZ-2729

    • Upgrade Vitess docker image to Vitess 8.0.0 DBZ-2749

    • Intermittent SQL Server test failure on CI - SqlServerConnectorIT DBZ-2625

    • Change initial.sync.max.threads to snapshot.max.threads DBZ-2742

    Release 1.4.0.Alpha1 (October 22nd, 2020)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.0 and has been tested with version 2.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.4.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.4.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.4.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Allow to specify subset of captured tables to be snapshotted DBZ-2456

    • Implement snapshot select override behavior for MongoDB DBZ-2496

    • Asciidoc block titles are rendered the same as regular text DBZ-2631

    • Allow closing of hung JDBC connection DBZ-2632

    • Hide stacktrace when default value for SQL Server cannot be parsed DBZ-2642

    • Implement a CDC connector for Vitess DBZ-2463

    • SqlServer - Skip processing of LSNs not associated with change table entries. DBZ-2582

    Fixes

    • Cant override environment variables DBZ-2559

    • Inconsistencies in PostgreSQL Connector Docs DBZ-2584

    • ConcurrentModificationException during exporting data for a mongodb collection in a sharded cluster DBZ-2597

    • Mysql connector didn’t pass the default db charset to the column definition DBZ-2604

    • [Doc] "registry.redhat.io/amq7/amq-streams-kafka-25: unknown: Not Found" error occurs DBZ-2609

    • [Doc] "Error: no context directory and no Containerfile specified" error occurs DBZ-2610

    • SqlExceptions using dbz with Oracle on RDS online logs and logminer DBZ-2624

    • Mining session stopped - task killed/SQL operation cancelled - Oracle LogMiner DBZ-2629

    • Unparseable DDL: Using 'trigger' as table alias in view creation DBZ-2639

    • Antlr DDL parser fails to interpret BLOB([size]) DBZ-2641

    • MySQL Connector keeps stale offset metadata after snapshot.new.tables is changed DBZ-2643

    • WAL logs are not flushed in Postgres Connector DBZ-2653

    • Debezium server Event Hubs plugin support in v1.3 DBZ-2660

    • Cassandra Connector doesn’t use log4j for logging correctly DBZ-2661

    • Should Allow NonAsciiCharacter in SQL DBZ-2670

    • MariaDB nextval function is not supported in grammar DBZ-2671

    • Sanitize field name do not santize sub struct field DBZ-2680

    • Debezium fails if a non-existing view with the same name as existing table is dropped DBZ-2688

    Other changes

    • Merge MySQL doc source files into one again DBZ-2127

    • Metrics links duplicate anchor IDs DBZ-2497

    • Slim down Vitess container image DBZ-2551

    • Modify release peipeline to support per-connector repos e.g. Vitess DBZ-2611

    • Add Vitess connector to Kafka Connect container image DBZ-2618

    • User Guide Documentation corrections for PostgreSQL DBZ-2621

    • Checkstyle should be built as a part of GH check formatting action DBZ-2623

    • Upgrade MySQL JDBC driver to version 8.0.19 DBZ-2626

    • Add support for multiple shard GTIDs in VGTID DBZ-2635

    • Add documentation for Vitess connector DBZ-2645

    • Restrict matrix job configurations to run only on Slaves DBZ-2648

    • Upgrade JUnit to 4.13.1 DBZ-2658

    • Avoid parsing generated files in Checkstyle DBZ-2669

    • Update debezium/awestruct image to use Antora 2.3.4 DBZ-2674

    • Fix doc typos and minor format glitches for downstream rendering DBZ-2681

    • Intermittent test failure on CI - RecordsStreamProducerIT#shouldReceiveHeartbeatAlsoWhenChangingNonWhitelistedTable() DBZ-2344

    \ No newline at end of file diff --git a/releases/1.5/index.html b/releases/1.5/index.html index b57b2b2098..cdd8da27cc 100644 --- a/releases/1.5/index.html +++ b/releases/1.5/index.html @@ -1 +1 @@ - Debezium Release Series 1.5

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.21
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 4.1.1
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.14
    Oracle Database: 12c, 19c
    Driver: 12.2.0.1, 19.8.0.0
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 8.0.x*, 9.0.x
    Driver: 9.0.0
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file + Debezium Release Series 1.5

    stable

    Tested Versions

    Java 8+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.21
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 4.1.1
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.14
    Oracle Database: 12c, 19c
    Driver: 12.2.0.1, 19.8.0.0
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.5.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 8.0.x*, 9.0.x
    Driver: 9.0.0
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file diff --git a/releases/1.5/release-notes.html b/releases/1.5/release-notes.html index cab086d40d..d3d8b065ea 100644 --- a/releases/1.5/release-notes.html +++ b/releases/1.5/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.5

    Release Notes for Debezium 1.5

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.5.4.Final (July 1st, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Schema change events of excluded databases are discarded DBZ-3622

    • Oracle DDL parser fails on CREATE TABLE: mismatched input 'maxtrans' expecting {'AS', ';'} DBZ-3641

    Other changes

    • Database history properties missing in connector docs DBZ-3459

    • Update deprecated config for debezium smt DBZ-3673

    • Prepare test-suite for Kafka on RHEL DBZ-3566

    Release 1.5.3.Final (June 17th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Oracle connector does not correctly handle partially committed transactions DBZ-3322

    • Broken links in User guide table of routing SMT configuration options DBZ-3410

    • Broken link to basic configuration example in downstream content-based routing topic DBZ-3412

    • source.timestamp.mode=commit imposes a significant performance penalty DBZ-3452

    • Timezone difference not considered in LagFromSourceInMilliseconds calculation DBZ-3456

    • MySQL metrics documentation refers to legacy implementation DBZ-3572

    • Update downstream MySQL doc to reference streaming metrics vs. binlog metrics DBZ-3582

    • Transaction commit event dispatch fails if no active transaction in progress. DBZ-3593

    • GRANT/REVOKE for roles is not working DBZ-3610

    • DDL ParsingException - "SUPPLEMENTAL LOG DATA (UNIQUE INDEX) COLUMNS" DBZ-3619

    Other changes

    • Modularize doc for MongoDB component DBZ-2334

    • Docs clarification around tombstone events DBZ-3416

    • Update external link to AMQ Streams documentation DBZ-3502

    • Formatting updates to correct errors in documentation builds DBZ-3518

    Release 1.5.2.Final (May 28th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Release 1.5.1.Final requires JDK >= 11 DBZ-3574

    Other changes

    • Update external links in downstream docs to AMQ Streams deployment information DBZ-3525

    Release 1.5.1.Final (May 27th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Retry logic for "No more data to read from socket" is too strict DBZ-3472

    Fixes

    • io.debezium.text.ParsingException: no viable alternative at input 'IDNUMBER(4)GENERATEDBY' DBZ-1721

    • oracle logminer cannot add duplicate logfile DBZ-3266

    • First online log query does not limit results to those that are available. DBZ-3332

    • Connector crashing after running for some time DBZ-3377

    • An exception in resolveOracleDatabaseVersion if system language is not English DBZ-3397

    • Rename table stores only a fragment of DDL in schema history DBZ-3399

    • Broken link in downstream Monitoring chapter 7.3 DBZ-3409

    • Broken link in content-based routing chapter to page for downloading the SMT scripting archive DBZ-3411

    • LogMinerDmlParser mishandles double single quotes in WHERE clauses DBZ-3413

    • Incorrectly formatted links in downstream automatic topic creation doc DBZ-3414

    • SMT acronym incorrectly expanded in Debezium User Guide DBZ-3415

    • Debezium mapped diagnostic contexts doesn’t work DBZ-3438

    • source.timestamp.mode=commit imposes a significant performance penalty DBZ-3452

    • Debezium MySQL connector does not process tables with partitions DBZ-3468

    • "Found null value for non-optional schema" error when issuing TRUNCATE from Postgres on a table with a PK DBZ-3469

    • Connector crashes when table name contains '-' character DBZ-3485

    • MySQL8 GRANT statement not parsable DBZ-3499

    • ReadToInsertEvent SMT needs to set ConfigDef DBZ-3508

    • SQLServer low throughput tables increase usage of TempDB DBZ-3515

    • Oracle redo log switch not detected when using multiple archiver process threads DBZ-3516

    • Missing schema function in DDL Parser DBZ-3543

    • DDL ParsingException "mismatched input 'sharing'" for create table syntax. DBZ-3549

    Other changes

    • User Guide corrections for SQL Server connector DBZ-3297

    • User Guide corrections for Db2 connector DBZ-3298

    • User Guide corrections for MySQL connector DBZ-3299

    • User Guide corrections for MongoDB connector DBZ-3300

    • Scope RHEL support for Debezium DBZ-3354

    • Reword prereq in downstream SQL Server connector doc DBZ-3392

    • Duplicate entry in MySQL connector properties table for mysql-property-skipped-operations DBZ-3402

    • Upgrade binlog client DBZ-3463

    • Backport documentation fixes to 1.5 DBZ-3532

    Release 1.5.0.Final (April 7th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Add support for Redis Streams target in Debezium Server DBZ-2879

    • Provide LSN coordinates as standardized sequence field DBZ-2911

    Fixes

    • Do not mine Data Guard archive log entries DBZ-3341

    • Debezium stuck in an infinite loop on boot DBZ-3343

    • Schema change SourceRecords have null partition DBZ-3347

    • LogMiner can incorrectly resolve that SCN is available DBZ-3348

    • The event.deserialization.failure.handling.mode is documented incorrectly DBZ-3353

    • DB2 Function wrong DBZ-3362

    • LogMiner parser incorrectly parses UNISTR function DBZ-3367

    • Invalid Decimal schema: scale parameter not found DBZ-3371

    Other changes

    • Allow Debezium Server to be used with Apicurio converters DBZ-2388

    • Remove connector properties from descriptors on the /connector-types response DBZ-3316

    • Literal attribute rendered in deployment instructions for the downstream PostgreSQL connector DBZ-3338

    • Fix test failures due to existing database object artifacts DBZ-3344

    • Use correct repository level PAT for building debezium website DBZ-3345

    • Document configuration of max.request.size DBZ-3355

    • Use Java 8 for Cassandra workflow DBZ-3357

    • Trigger workflow on workflow definition update DBZ-3358

    • Prefer DDL before logical schema in history recovery DBZ-3361

    • Add missing space and omitted command to PostgreSQL connector doc DBZ-3372

    • Wrong badge on Docker Hub DBZ-3383

    Release 1.5.0.CR1 (March 24th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Oracle connector was promoted from incubation to stable state (DBZ-3290). As the result the following changes were included to prevent future breaking changes

    • configuration option database.oracle.version has been removed

    • the LogMiner specific metrics has been incorporated to the streaming metrics

    • scn and commit_scn fields in the source info block are no longer LONG but STRING to enable very large SCN values (DBZ-2994)

    New Features

    • Upgrade to Apache Kafka 2.7.0 DBZ-2872

    • Add more parameters to TLS support DBZ-3262

    Fixes

    • Debezium logs "is not a valid Avro schema name" can be too verbose DBZ-2511

    • message.key.columns Regex Validation Time Complexity DBZ-2957

    • OID values don’t fit to INT32 schema DBZ-3033

    • Connector automatically restart on ORA-26653 DBZ-3236

    • UI container has no assets (JS artifacts, fonts, etc) and randomly fails building DBZ-3247

    • Revert Clob behavior for Oracle LogMiner to avoid null values DBZ-3257

    • SQL Server misses description for decimal.handling.mode DBZ-3267

    • Oracle connector ignores time.precision.mode and just uses adaptive mode DBZ-3268

    • commons-logging JAR is missing from Debezium Server distro DBZ-3277

    • MongoDB timeouts crash the whole connector DBZ-3278

    • Prefer archive logs over redo logs of the same SCN range DBZ-3292

    • LogMiner mining query may unintentionally skip records DBZ-3295

    • IndexOutOfBoundsException when LogMiner DML update statement contains a function as last column’s value DBZ-3305

    • Out of memory with mysql snapshots (regression of DBZ-94) DBZ-3309

    • Keyword ORDER is a valid identifier in MySQL grammar DBZ-3310

    • DDL statement couldn’t be parsed for ROW_FORMAT=TOKUDB_QUICKLZ DBZ-3311

    • LogMiner can miss a log switch event if too many switches occur. DBZ-3319

    • Function MOD is missing from MySQL grammar DBZ-3333

    • Incorrect SR label names in OCP testusite DBZ-3336

    • DB2 upstream tests are still using master as the default branch DBZ-3337

    Other changes

    • Demo: Exploring non-key joins of Kafka Streams 2.4 DBZ-2100

    • Publish Debezium BOM POM DBZ-2145

    • Use BigInteger as SCN rather than BigDecimal DBZ-2457

    • Document ChangeConsumer usage for Debezium Engine DBZ-2520

    • Add check that target release is set DBZ-2536

    • Consolidate multiple JMX beans during Oracle streaming with LogMiner DBZ-2537

    • Create script for listing all contributors of a release DBZ-2592

    • Explicitly mention Debezium Engine database history config for different connectors DBZ-2665

    • Cleanup by restructuring Debezium UI REST API structure DBZ-3031

    • Make Debezium main repo build checks artifacts for CI/CD checks in sibling repositories available on Maven Central DBZ-3142

    • Handle duplicate warnings for deprecated options DBZ-3218

    • Upgrade Jackson as per AK 2.7 DBZ-3221

    • Document the need of qualified names in snapshot.include.collection.list DBZ-3244

    • Add snapshot.select.statement.override options to Oracle documentation DBZ-3250

    • Remove all possible backend calls from non-validation mode DBZ-3255

    • Document delayed TX END markers DBZ-3261

    • Extended scripting SMT docs with handling of non-data events DBZ-3269

    • Unify column inclusion/exclusion handling DBZ-3271

    • Downstream conditional spans topic boundary in db2 doc DBZ-3272

    • Add info about languge dependencies into scripting SMTs DBZ-3280

    • Copyright check script should take additional connector repos into consideration DBZ-3281

    • Intermittent failure of MyMetricsIT.testStreamingOnlyMetrics DBZ-3304

    • Remove references to supported configurations from Db2 connector documentation DBZ-3308

    • Use separate API calls to get the connector info(name, id etc) and details(Properties) DBZ-3314

    • Documentation updates should trigger a website build DBZ-3320

    • Cassandra connector is not part of core CI build DBZ-3335

    Release 1.5.0.Beta2 (March 12th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The Oracle connector emits NUMBER(1) columns as int8 now by default. To emit them as boolean instead, use the io.debezium.connector.oracle.converters.NumberOneToBooleanConverter as described in the connector documentation (DBZ-3208).

    The Debezium connector for Oracle now uses the LogMiner-based capturing implementation by default. In order to use the XStream-based implementation, set the connector option database.connection.adapter to xstream (DBZ-3241).

    New Features

    • Detect and skip non-parent index-organized tables DBZ-3036

    • Capture additional JMX metrics for LogMiner DBZ-3038

    • Incorrect information in Debezium connector for Postgres documentation DBZ-3197

    • Add support for SET column type DBZ-3199

    • Improve relocation logic for processed commitLog files DBZ-3224

    • Disable log.mining.transaction.retention.hours logic by default DBZ-3242

    • Provide a signalling table DBZ-3141

    • Update sensitive env vars for connect-base image DBZ-3223

    • Support specifying kinesis endpoint in debezium server DBZ-3246

    • Add log4j.properties file DBZ-3248

    Fixes

    • Error in LSN DBZ-2417

    • Connector restarts with an SCN that was previously processed. DBZ-2875

    • Misleading error message for filtered publication with misconfigured filters DBZ-2885

    • There are still important problems with Oracle LogMiner DBZ-2976

    • Don’t execute initial statements upon connector validation DBZ-3030

    • Forever stuck with new binlog parser (1.3 and later) when processing big JSON column data DBZ-3106

    • Change Events are not captured after initial load DBZ-3128

    • Repeating Unknown schema error even after recent schema_recovery DBZ-3146

    • CloudEvent value id field is not unique DBZ-3157

    • Oracle connector fails when using database.tablename.case.insensitive=true DBZ-3190

    • DML parser IndexOutOfRangeException with where-clause using "IS NULL" DBZ-3193

    • ORA-01284 file cannot be opened error when file locked by another process DBZ-3194

    • CommitThroughput metrics can raise division by zero error DBZ-3200

    • LogMiner does not process NUMBER(1) data DBZ-3208

    • Update MongoDB driver version DBZ-3212

    • Extra connectors are not buildable unless main Debezium is built locally DBZ-3213

    • Docker image debezium/server:1.5 won’t start DBZ-3217

    • Debezium Oracle Connector not excluding table columns DBZ-3219

    • LogMiner parse failure with Update DML with no where condition DBZ-3235

    • Debezium 1.4.2.Final and onwards unable to parse sasl.jaas.config from env var DBZ-3245

    • Debezium engine should call stop on task even when start fails DBZ-3251

    • No meaningful message provided when oracle driver is missing DBZ-3254

    Other changes

    • Discuss capture job configuration as a tuning option for SQL Server and Db2 DBZ-2122

    • Prepare customizing auto-created topics doc for downstream DBZ-2654

    • Wrong warning about deprecated options DBZ-3084

    • Have non-validating mode in the UI DBZ-3088

    • Move container image builds to GH Actions DBZ-3131

    • Exclude CommonConnectorConfig.PROVIDE_TRANSACTION_METADATA from connectors not supporting it DBZ-3132

    • Add example for Debezium UI to debezium-examples repo DBZ-3134

    • Clarify required privileges for using pgoutput DBZ-3138

    • Do not rely on Max SCN seed value w/LogMiner DBZ-3145

    • Postgres documentation improvements DBZ-3149

    • Support running Oracle test suite in non-CDB (no PDB name) mode DBZ-3154

    • Update Oracle documentation DBZ-3156

    • Move the Oracle connector to the main repostory DBZ-3166

    • Minor editorial update to PostgreSQL connector documentation DBZ-3192

    • Incorrect link/anchor pair for truncate.handling.mode property in PG properties documentation DBZ-3195

    • Update oracle-vagrant-box DBZ-3206

    • Update Oracle versions tested DBZ-3215

    • Oracle test suite does not always clean-up tables after tests DBZ-3237

    • Update Oracle tutorial example DBZ-3239

    • Use LogMiner adapter by default for Oracle connector DBZ-3241

    • Avoid reference to upstream Docker set-up DBZ-3259

    Release 1.5.0.Beta1 (February 23rd, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    A regression in the binlog client used by Debezium was identified where large JSON documents in a MySQL JSON column cause a severe performance degredation (DBZ-3106). This issue is under active discussion with the maintainer of the binlog client library.

    In earlier versions of Debezium, the MySQL connector incorrectly emitted snapshot events using the c (create) operation type instead of the correct type r (read). If you have consumers which rely on that earlier behavior, you can use the io.debezium.connector.mysql.transforms.ReadToInsertEvent single message transform to emulate that earlier behavior (DBZ-2788). A connector option which accidentally was introduced in 1.4.0 for this same purpose, snapshot.events.as.inserts, got removed again, and the SMT should be used instead in this situation. This SMT is meant for migration purposes only and will be removed in a future Debezium version.

    The (incubating) Debezium connector for Oracle emits transaction ids in lower-case now, differing from the previous behavior of returning them as upper-case (DBZ-3165).

    The previously deprecated snapshot mode INITIAL_SCHEMA_ONLY of the Oracle connector has been removed. Please use SCHEMA_ONLY instead (DBZ-3034).

    New Features

    • Make field descriptions consistent for time values (milliseconds, ms, sec, seconds, etc) DBZ-2858

    • DebeziumEngine RecordChangeEvents cannot be modified DBZ-2897

    • Add license headers and related checkstyle checks for Debezium UI files DBZ-2985

    • Display commit SHA of UI frontend/backend somewhere in the footer DBZ-3052

    • Implement UX suggestions for display of connector type DBZ-3054

    • SqlServerConnector does not implement validate DBZ-3056

    • Database History Producer does not close with a timeout DBZ-3075

    • Improve DML parser performance DBZ-3078

    • Connector list table UI improvement desktop/mobile DBZ-3079

    • Vitess Connector adds support for Vitess 9.0.0 GA DBZ-3100

    • Improve layout for Column Truncate - Mask Component DBZ-3101

    • Improve layout for Data options component and main wizard nav DBZ-3105

    • Add ability to skip tests based on available database options DBZ-3110

    • Support for Transaction Metadata in MySql connector DBZ-3114

    • Add support for JSON column type DBZ-3115

    • Add support for ENUM column type DBZ-3124

    • Enable easy downloading of Camel Kafka Connectors DBZ-3136

    • Capture LogMiner session parameters when session fails to start DBZ-3153

    • Process special values in temporal datatypes DBZ-2614

    Fixes

    • Negative timestamps are converted to positive during snapshot DBZ-2616

    • Wrong reference to KafkaConnector in setting up Debezium DBZ-2745

    • Oracle Connector(Using Logminer) with Oracle RDS (v12) does not capture changes DBZ-2754

    • Oracle connector causes ORA-65090 when connecting to an Oracle instance running in non-CDB mode DBZ-2795

    • Warnings and notifications from PostgreSQL are ignored by the connector until the connection is closed DBZ-2865

    • Add support for MySQL to UI Backend DBZ-2950

    • ExtractNewRecord SMT incorrectly extracts ts_ms from source info DBZ-2984

    • Replication terminates with ORA-01291: missing log file DBZ-3001

    • Kafka Docker image the HEAP_OPTS variable is not used DBZ-3006

    • Support multiple schemas with Oracle LogMiner DBZ-3009

    • Function calls does not allow parentheses for functions with non-mandatory parentheses DBZ-3017

    • Complete support for properties that contain hyphens DBZ-3019

    • UI issues with connectors table row expansion state DBZ-3049

    • SQLException for Global temp tables from OracleDatabaseMetaData.getIndexInfo() makes Debezium snapshotting fail DBZ-3057

    • Cassandra Connector doesn’t support Cassandra version >=3.11.5 DBZ-3060

    • Make Cassandra Connector work with CommitLogTransfer better DBZ-3063

    • no viable alternative at input 'create or replace index' DBZ-3067

    • Connect image propagates env vars starting with CONNECT prefix DBZ-3070

    • PgOutputMessageDecoder doesn’t order primary keys DBZ-3074

    • Strange transaction metadata for Oracle logminer connector DBZ-3090

    • Getting RejectedExecutionException when checking topic settings from KafkaDatabaseHistory.checkTopicSettings DBZ-3096

    • Environment Variables with spaces are truncated when written to properties file DBZ-3103

    • Error: Supplemental logging not configured for table. Use command: ALTER TABLE DBZ-3109

    • Uncaught (in promise) TypeError: Cannot read property 'call' of undefined DBZ-3125

    • Final stage of snapshot analyzes tables not present in table.include.list thus stumbles upon unsupported XMLTYPE table DBZ-3151

    • Missing Prometheus port in kafka network policy DBZ-3170

    • XStream does not process NUMER(1) data DBZ-3172

    Other changes

    • Setup CI job for DB2 DBZ-2235

    • Integration with Service Registry promoted to GA DBZ-2815

    • Remove DECIMAL string sanitisation once Vitess upstream bug is fixed DBZ-2908

    • Review format and configuration options for Db2 for GA DBZ-2977

    • Test with Postgres 13 DBZ-3022

    • Prepare Debezium UI to participate in upstream releases DBZ-3027

    • Upgrade testcontainers to 1.15.1 DBZ-3066

    • Use new deployment endpoint for releases to Maven Central DBZ-3069

    • Remove obsolete Awestruct container image DBZ-3072

    • "JDBC driver" doesn’t make sense for non-relational connectors DBZ-3076

    • Replace RecordMakers with MySqlChangeRecordEmitter DBZ-3077

    • Make CI builds resilient against disconnects on GH Actions infrastructure DBZ-3083

    • Separate SourceInfo and MySQL offset context DBZ-3086

    • Remove zero-width whitespace from option names DBZ-3087

    • Adapt UI for MySQL connector type DBZ-3091

    • Change MySQL database schema contract to support separate parsing and processing phase DBZ-3093

    • MySQL build stuck for 6h DBZ-3095

    • Rewrite legacy reader tests DBZ-3099

    • Intermittent test failure in Postgres PostgresConnectorIT#customSnapshotterSkipsTablesOnRestart DBZ-3107

    • Remove duplicate anchor links in Connector properties DBZ-3111

    • Upgrade to Quarkus 1.12.0.Final DBZ-3116

    • Config validation for Vitess DBZ-3117

    • Config validation for Oracle DBZ-3119

    • Avoid naming conflict between connection classes DBZ-3147

    • Set up commit message check for Vitess DBZ-3152

    • Put IIDR license requirement into NOTE box DBZ-3163

    • Consistent logging of connection validation failure DBZ-3164

    • Remove COLUMN_BLACK_LIST option in Oracle connector DBZ-3167

    Release 1.5.0.Alpha1 (February 4th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    A new capturing implementation for the Debezium MySQL connector has been created (DBZ-1865) based on the common connector framework used by all the other Kafka Connect connectors of Debezium. The connector behaviour is almost in parity with previous implementation, with the exception of the experimental parallel snapshotting feature (DBZ-175), which isn’t available with the new implementation yet and which is planned to be re-introduced later in a different form.

    In addition, records in the schema change topic now have a non-optional field tableChanges, which describes the represented schema change in a structured way. If you are using a schema registry, use a compatibility setting of FORWARD for the schema change topic, so to make sure that the new schema version used for schema change events can be registered successfully.

    If you encounter any issues with the new MySQL connector implementation, please log a Jira issue; in this case, you can use the legacy implementation by setting the internal.implementation=legacy connector configuration option.

    New Features

    • Support emitting TRUNCATE events in PostgreSQL pgoutput plugin DBZ-2382

    • Migrate DebeziumContainer enhancements for DBZ-2950 and DBZ-2952 into main repository DBZ-3024

    • Implement meta tags DBZ-2620

    • Improve performance for very large postgres schemas DBZ-2575

    Fixes

    • Extra connectors are not buildable unless main Debezium is built locally DBZ-2901

    • java.sql.SQLException: ORA-01333: failed to establish Logminer Dictionary DBZ-2939

    • Add support for connector/task lifecycle ops to UI backend DBZ-2951

    • Cassandra CDC failed to deserialize list<UserType> column correct DBZ-2974

    • Debezium Oracle Connector will appear stuck on large SCN jumps DBZ-2982

    • Invalid regex patterns should fail validation when validation database.include/exclude.list properties for MySQL connector DBZ-3008

    • Fix repository config for Jenkis snapshot deployment DBZ-3011

    • Unable to parse non-constant SIGNAL option value DBZ-3018

    • Cannot parse expression in DEFAULT column definition DBZ-3020

    • Key being used as value in pubsub batch handler DBZ-3037

    • Table creation DDL with CHARACTER SET = DEFAULT causes MySQL connector failure DBZ-3023

    • Missing some MariaDB existence predicates in ALTER TABLE DBZ-3039

    Other changes

    • Improved resiliency of release process against OSS failures DBZ-2274

    • Pull up HOSTNAME, PORT, DATABASE_NAME, USER and PASSWORD to RelationalDatabaseConnectorConfig DBZ-2420

    • Db2 Connector doesn’t declare database related config options DBZ-2424

    • Fix build status badge in README files DBZ-2802

    • Merge and complete web components PR DBZ-2804

    • IBM Db2 Connector promoted to GA DBZ-2814

    • Document several Oracle frequently encountered problems DBZ-2970

    • No syntax highlighting on website listings DBZ-2978

    • Admonition icons missing DBZ-2986

    • Improve logging for Logminer adapter DBZ-2999

    • CI build not required for changes in README files DBZ-3012

    • Execute ZZZGtidSetIT as the last test DBZ-3047

    • Capture and report LogMiner state when mining session fails to start DBZ-3055

    \ No newline at end of file + Release Notes for Debezium 1.5

    Release Notes for Debezium 1.5

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.5.4.Final (July 1st, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Schema change events of excluded databases are discarded DBZ-3622

    • Oracle DDL parser fails on CREATE TABLE: mismatched input 'maxtrans' expecting {'AS', ';'} DBZ-3641

    Other changes

    • Database history properties missing in connector docs DBZ-3459

    • Update deprecated config for debezium smt DBZ-3673

    • Prepare test-suite for Kafka on RHEL DBZ-3566

    Release 1.5.3.Final (June 17th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Oracle connector does not correctly handle partially committed transactions DBZ-3322

    • Broken links in User guide table of routing SMT configuration options DBZ-3410

    • Broken link to basic configuration example in downstream content-based routing topic DBZ-3412

    • source.timestamp.mode=commit imposes a significant performance penalty DBZ-3452

    • Timezone difference not considered in LagFromSourceInMilliseconds calculation DBZ-3456

    • MySQL metrics documentation refers to legacy implementation DBZ-3572

    • Update downstream MySQL doc to reference streaming metrics vs. binlog metrics DBZ-3582

    • Transaction commit event dispatch fails if no active transaction in progress. DBZ-3593

    • GRANT/REVOKE for roles is not working DBZ-3610

    • DDL ParsingException - "SUPPLEMENTAL LOG DATA (UNIQUE INDEX) COLUMNS" DBZ-3619

    Other changes

    • Modularize doc for MongoDB component DBZ-2334

    • Docs clarification around tombstone events DBZ-3416

    • Update external link to AMQ Streams documentation DBZ-3502

    • Formatting updates to correct errors in documentation builds DBZ-3518

    Release 1.5.2.Final (May 28th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Release 1.5.1.Final requires JDK >= 11 DBZ-3574

    Other changes

    • Update external links in downstream docs to AMQ Streams deployment information DBZ-3525

    Release 1.5.1.Final (May 27th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Retry logic for "No more data to read from socket" is too strict DBZ-3472

    Fixes

    • io.debezium.text.ParsingException: no viable alternative at input 'IDNUMBER(4)GENERATEDBY' DBZ-1721

    • oracle logminer cannot add duplicate logfile DBZ-3266

    • First online log query does not limit results to those that are available. DBZ-3332

    • Connector crashing after running for some time DBZ-3377

    • An exception in resolveOracleDatabaseVersion if system language is not English DBZ-3397

    • Rename table stores only a fragment of DDL in schema history DBZ-3399

    • Broken link in downstream Monitoring chapter 7.3 DBZ-3409

    • Broken link in content-based routing chapter to page for downloading the SMT scripting archive DBZ-3411

    • LogMinerDmlParser mishandles double single quotes in WHERE clauses DBZ-3413

    • Incorrectly formatted links in downstream automatic topic creation doc DBZ-3414

    • SMT acronym incorrectly expanded in Debezium User Guide DBZ-3415

    • Debezium mapped diagnostic contexts doesn’t work DBZ-3438

    • source.timestamp.mode=commit imposes a significant performance penalty DBZ-3452

    • Debezium MySQL connector does not process tables with partitions DBZ-3468

    • "Found null value for non-optional schema" error when issuing TRUNCATE from Postgres on a table with a PK DBZ-3469

    • Connector crashes when table name contains '-' character DBZ-3485

    • MySQL8 GRANT statement not parsable DBZ-3499

    • ReadToInsertEvent SMT needs to set ConfigDef DBZ-3508

    • SQLServer low throughput tables increase usage of TempDB DBZ-3515

    • Oracle redo log switch not detected when using multiple archiver process threads DBZ-3516

    • Missing schema function in DDL Parser DBZ-3543

    • DDL ParsingException "mismatched input 'sharing'" for create table syntax. DBZ-3549

    Other changes

    • User Guide corrections for SQL Server connector DBZ-3297

    • User Guide corrections for Db2 connector DBZ-3298

    • User Guide corrections for MySQL connector DBZ-3299

    • User Guide corrections for MongoDB connector DBZ-3300

    • Scope RHEL support for Debezium DBZ-3354

    • Reword prereq in downstream SQL Server connector doc DBZ-3392

    • Duplicate entry in MySQL connector properties table for mysql-property-skipped-operations DBZ-3402

    • Upgrade binlog client DBZ-3463

    • Backport documentation fixes to 1.5 DBZ-3532

    Release 1.5.0.Final (April 7th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Add support for Redis Streams target in Debezium Server DBZ-2879

    • Provide LSN coordinates as standardized sequence field DBZ-2911

    Fixes

    • Do not mine Data Guard archive log entries DBZ-3341

    • Debezium stuck in an infinite loop on boot DBZ-3343

    • Schema change SourceRecords have null partition DBZ-3347

    • LogMiner can incorrectly resolve that SCN is available DBZ-3348

    • The event.deserialization.failure.handling.mode is documented incorrectly DBZ-3353

    • DB2 Function wrong DBZ-3362

    • LogMiner parser incorrectly parses UNISTR function DBZ-3367

    • Invalid Decimal schema: scale parameter not found DBZ-3371

    Other changes

    • Allow Debezium Server to be used with Apicurio converters DBZ-2388

    • Remove connector properties from descriptors on the /connector-types response DBZ-3316

    • Literal attribute rendered in deployment instructions for the downstream PostgreSQL connector DBZ-3338

    • Fix test failures due to existing database object artifacts DBZ-3344

    • Use correct repository level PAT for building debezium website DBZ-3345

    • Document configuration of max.request.size DBZ-3355

    • Use Java 8 for Cassandra workflow DBZ-3357

    • Trigger workflow on workflow definition update DBZ-3358

    • Prefer DDL before logical schema in history recovery DBZ-3361

    • Add missing space and omitted command to PostgreSQL connector doc DBZ-3372

    • Wrong badge on Docker Hub DBZ-3383

    Release 1.5.0.CR1 (March 24th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Oracle connector was promoted from incubation to stable state (DBZ-3290). As the result the following changes were included to prevent future breaking changes

    • configuration option database.oracle.version has been removed

    • the LogMiner specific metrics has been incorporated to the streaming metrics

    • scn and commit_scn fields in the source info block are no longer LONG but STRING to enable very large SCN values (DBZ-2994)

    New Features

    • Upgrade to Apache Kafka 2.7.0 DBZ-2872

    • Add more parameters to TLS support DBZ-3262

    Fixes

    • Debezium logs "is not a valid Avro schema name" can be too verbose DBZ-2511

    • message.key.columns Regex Validation Time Complexity DBZ-2957

    • OID values don’t fit to INT32 schema DBZ-3033

    • Connector automatically restart on ORA-26653 DBZ-3236

    • UI container has no assets (JS artifacts, fonts, etc) and randomly fails building DBZ-3247

    • Revert Clob behavior for Oracle LogMiner to avoid null values DBZ-3257

    • SQL Server misses description for decimal.handling.mode DBZ-3267

    • Oracle connector ignores time.precision.mode and just uses adaptive mode DBZ-3268

    • commons-logging JAR is missing from Debezium Server distro DBZ-3277

    • MongoDB timeouts crash the whole connector DBZ-3278

    • Prefer archive logs over redo logs of the same SCN range DBZ-3292

    • LogMiner mining query may unintentionally skip records DBZ-3295

    • IndexOutOfBoundsException when LogMiner DML update statement contains a function as last column’s value DBZ-3305

    • Out of memory with mysql snapshots (regression of DBZ-94) DBZ-3309

    • Keyword ORDER is a valid identifier in MySQL grammar DBZ-3310

    • DDL statement couldn’t be parsed for ROW_FORMAT=TOKUDB_QUICKLZ DBZ-3311

    • LogMiner can miss a log switch event if too many switches occur. DBZ-3319

    • Function MOD is missing from MySQL grammar DBZ-3333

    • Incorrect SR label names in OCP testusite DBZ-3336

    • DB2 upstream tests are still using master as the default branch DBZ-3337

    Other changes

    • Demo: Exploring non-key joins of Kafka Streams 2.4 DBZ-2100

    • Publish Debezium BOM POM DBZ-2145

    • Use BigInteger as SCN rather than BigDecimal DBZ-2457

    • Document ChangeConsumer usage for Debezium Engine DBZ-2520

    • Add check that target release is set DBZ-2536

    • Consolidate multiple JMX beans during Oracle streaming with LogMiner DBZ-2537

    • Create script for listing all contributors of a release DBZ-2592

    • Explicitly mention Debezium Engine database history config for different connectors DBZ-2665

    • Cleanup by restructuring Debezium UI REST API structure DBZ-3031

    • Make Debezium main repo build checks artifacts for CI/CD checks in sibling repositories available on Maven Central DBZ-3142

    • Handle duplicate warnings for deprecated options DBZ-3218

    • Upgrade Jackson as per AK 2.7 DBZ-3221

    • Document the need of qualified names in snapshot.include.collection.list DBZ-3244

    • Add snapshot.select.statement.override options to Oracle documentation DBZ-3250

    • Remove all possible backend calls from non-validation mode DBZ-3255

    • Document delayed TX END markers DBZ-3261

    • Extended scripting SMT docs with handling of non-data events DBZ-3269

    • Unify column inclusion/exclusion handling DBZ-3271

    • Downstream conditional spans topic boundary in db2 doc DBZ-3272

    • Add info about languge dependencies into scripting SMTs DBZ-3280

    • Copyright check script should take additional connector repos into consideration DBZ-3281

    • Intermittent failure of MyMetricsIT.testStreamingOnlyMetrics DBZ-3304

    • Remove references to supported configurations from Db2 connector documentation DBZ-3308

    • Use separate API calls to get the connector info(name, id etc) and details(Properties) DBZ-3314

    • Documentation updates should trigger a website build DBZ-3320

    • Cassandra connector is not part of core CI build DBZ-3335

    Release 1.5.0.Beta2 (March 12th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The Oracle connector emits NUMBER(1) columns as int8 now by default. To emit them as boolean instead, use the io.debezium.connector.oracle.converters.NumberOneToBooleanConverter as described in the connector documentation (DBZ-3208).

    The Debezium connector for Oracle now uses the LogMiner-based capturing implementation by default. In order to use the XStream-based implementation, set the connector option database.connection.adapter to xstream (DBZ-3241).

    New Features

    • Detect and skip non-parent index-organized tables DBZ-3036

    • Capture additional JMX metrics for LogMiner DBZ-3038

    • Incorrect information in Debezium connector for Postgres documentation DBZ-3197

    • Add support for SET column type DBZ-3199

    • Improve relocation logic for processed commitLog files DBZ-3224

    • Disable log.mining.transaction.retention.hours logic by default DBZ-3242

    • Provide a signalling table DBZ-3141

    • Update sensitive env vars for connect-base image DBZ-3223

    • Support specifying kinesis endpoint in debezium server DBZ-3246

    • Add log4j.properties file DBZ-3248

    Fixes

    • Error in LSN DBZ-2417

    • Connector restarts with an SCN that was previously processed. DBZ-2875

    • Misleading error message for filtered publication with misconfigured filters DBZ-2885

    • There are still important problems with Oracle LogMiner DBZ-2976

    • Don’t execute initial statements upon connector validation DBZ-3030

    • Forever stuck with new binlog parser (1.3 and later) when processing big JSON column data DBZ-3106

    • Change Events are not captured after initial load DBZ-3128

    • Repeating Unknown schema error even after recent schema_recovery DBZ-3146

    • CloudEvent value id field is not unique DBZ-3157

    • Oracle connector fails when using database.tablename.case.insensitive=true DBZ-3190

    • DML parser IndexOutOfRangeException with where-clause using "IS NULL" DBZ-3193

    • ORA-01284 file cannot be opened error when file locked by another process DBZ-3194

    • CommitThroughput metrics can raise division by zero error DBZ-3200

    • LogMiner does not process NUMBER(1) data DBZ-3208

    • Update MongoDB driver version DBZ-3212

    • Extra connectors are not buildable unless main Debezium is built locally DBZ-3213

    • Docker image debezium/server:1.5 won’t start DBZ-3217

    • Debezium Oracle Connector not excluding table columns DBZ-3219

    • LogMiner parse failure with Update DML with no where condition DBZ-3235

    • Debezium 1.4.2.Final and onwards unable to parse sasl.jaas.config from env var DBZ-3245

    • Debezium engine should call stop on task even when start fails DBZ-3251

    • No meaningful message provided when oracle driver is missing DBZ-3254

    Other changes

    • Discuss capture job configuration as a tuning option for SQL Server and Db2 DBZ-2122

    • Prepare customizing auto-created topics doc for downstream DBZ-2654

    • Wrong warning about deprecated options DBZ-3084

    • Have non-validating mode in the UI DBZ-3088

    • Move container image builds to GH Actions DBZ-3131

    • Exclude CommonConnectorConfig.PROVIDE_TRANSACTION_METADATA from connectors not supporting it DBZ-3132

    • Add example for Debezium UI to debezium-examples repo DBZ-3134

    • Clarify required privileges for using pgoutput DBZ-3138

    • Do not rely on Max SCN seed value w/LogMiner DBZ-3145

    • Postgres documentation improvements DBZ-3149

    • Support running Oracle test suite in non-CDB (no PDB name) mode DBZ-3154

    • Update Oracle documentation DBZ-3156

    • Move the Oracle connector to the main repostory DBZ-3166

    • Minor editorial update to PostgreSQL connector documentation DBZ-3192

    • Incorrect link/anchor pair for truncate.handling.mode property in PG properties documentation DBZ-3195

    • Update oracle-vagrant-box DBZ-3206

    • Update Oracle versions tested DBZ-3215

    • Oracle test suite does not always clean-up tables after tests DBZ-3237

    • Update Oracle tutorial example DBZ-3239

    • Use LogMiner adapter by default for Oracle connector DBZ-3241

    • Avoid reference to upstream Docker set-up DBZ-3259

    Release 1.5.0.Beta1 (February 23rd, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    A regression in the binlog client used by Debezium was identified where large JSON documents in a MySQL JSON column cause a severe performance degredation (DBZ-3106). This issue is under active discussion with the maintainer of the binlog client library.

    In earlier versions of Debezium, the MySQL connector incorrectly emitted snapshot events using the c (create) operation type instead of the correct type r (read). If you have consumers which rely on that earlier behavior, you can use the io.debezium.connector.mysql.transforms.ReadToInsertEvent single message transform to emulate that earlier behavior (DBZ-2788). A connector option which accidentally was introduced in 1.4.0 for this same purpose, snapshot.events.as.inserts, got removed again, and the SMT should be used instead in this situation. This SMT is meant for migration purposes only and will be removed in a future Debezium version.

    The (incubating) Debezium connector for Oracle emits transaction ids in lower-case now, differing from the previous behavior of returning them as upper-case (DBZ-3165).

    The previously deprecated snapshot mode INITIAL_SCHEMA_ONLY of the Oracle connector has been removed. Please use SCHEMA_ONLY instead (DBZ-3034).

    New Features

    • Make field descriptions consistent for time values (milliseconds, ms, sec, seconds, etc) DBZ-2858

    • DebeziumEngine RecordChangeEvents cannot be modified DBZ-2897

    • Add license headers and related checkstyle checks for Debezium UI files DBZ-2985

    • Display commit SHA of UI frontend/backend somewhere in the footer DBZ-3052

    • Implement UX suggestions for display of connector type DBZ-3054

    • SqlServerConnector does not implement validate DBZ-3056

    • Database History Producer does not close with a timeout DBZ-3075

    • Improve DML parser performance DBZ-3078

    • Connector list table UI improvement desktop/mobile DBZ-3079

    • Vitess Connector adds support for Vitess 9.0.0 GA DBZ-3100

    • Improve layout for Column Truncate - Mask Component DBZ-3101

    • Improve layout for Data options component and main wizard nav DBZ-3105

    • Add ability to skip tests based on available database options DBZ-3110

    • Support for Transaction Metadata in MySql connector DBZ-3114

    • Add support for JSON column type DBZ-3115

    • Add support for ENUM column type DBZ-3124

    • Enable easy downloading of Camel Kafka Connectors DBZ-3136

    • Capture LogMiner session parameters when session fails to start DBZ-3153

    • Process special values in temporal datatypes DBZ-2614

    Fixes

    • Negative timestamps are converted to positive during snapshot DBZ-2616

    • Wrong reference to KafkaConnector in setting up Debezium DBZ-2745

    • Oracle Connector(Using Logminer) with Oracle RDS (v12) does not capture changes DBZ-2754

    • Oracle connector causes ORA-65090 when connecting to an Oracle instance running in non-CDB mode DBZ-2795

    • Warnings and notifications from PostgreSQL are ignored by the connector until the connection is closed DBZ-2865

    • Add support for MySQL to UI Backend DBZ-2950

    • ExtractNewRecord SMT incorrectly extracts ts_ms from source info DBZ-2984

    • Replication terminates with ORA-01291: missing log file DBZ-3001

    • Kafka Docker image the HEAP_OPTS variable is not used DBZ-3006

    • Support multiple schemas with Oracle LogMiner DBZ-3009

    • Function calls does not allow parentheses for functions with non-mandatory parentheses DBZ-3017

    • Complete support for properties that contain hyphens DBZ-3019

    • UI issues with connectors table row expansion state DBZ-3049

    • SQLException for Global temp tables from OracleDatabaseMetaData.getIndexInfo() makes Debezium snapshotting fail DBZ-3057

    • Cassandra Connector doesn’t support Cassandra version >=3.11.5 DBZ-3060

    • Make Cassandra Connector work with CommitLogTransfer better DBZ-3063

    • no viable alternative at input 'create or replace index' DBZ-3067

    • Connect image propagates env vars starting with CONNECT prefix DBZ-3070

    • PgOutputMessageDecoder doesn’t order primary keys DBZ-3074

    • Strange transaction metadata for Oracle logminer connector DBZ-3090

    • Getting RejectedExecutionException when checking topic settings from KafkaDatabaseHistory.checkTopicSettings DBZ-3096

    • Environment Variables with spaces are truncated when written to properties file DBZ-3103

    • Error: Supplemental logging not configured for table. Use command: ALTER TABLE DBZ-3109

    • Uncaught (in promise) TypeError: Cannot read property 'call' of undefined DBZ-3125

    • Final stage of snapshot analyzes tables not present in table.include.list thus stumbles upon unsupported XMLTYPE table DBZ-3151

    • Missing Prometheus port in kafka network policy DBZ-3170

    • XStream does not process NUMER(1) data DBZ-3172

    Other changes

    • Setup CI job for DB2 DBZ-2235

    • Integration with Service Registry promoted to GA DBZ-2815

    • Remove DECIMAL string sanitisation once Vitess upstream bug is fixed DBZ-2908

    • Review format and configuration options for Db2 for GA DBZ-2977

    • Test with Postgres 13 DBZ-3022

    • Prepare Debezium UI to participate in upstream releases DBZ-3027

    • Upgrade testcontainers to 1.15.1 DBZ-3066

    • Use new deployment endpoint for releases to Maven Central DBZ-3069

    • Remove obsolete Awestruct container image DBZ-3072

    • "JDBC driver" doesn’t make sense for non-relational connectors DBZ-3076

    • Replace RecordMakers with MySqlChangeRecordEmitter DBZ-3077

    • Make CI builds resilient against disconnects on GH Actions infrastructure DBZ-3083

    • Separate SourceInfo and MySQL offset context DBZ-3086

    • Remove zero-width whitespace from option names DBZ-3087

    • Adapt UI for MySQL connector type DBZ-3091

    • Change MySQL database schema contract to support separate parsing and processing phase DBZ-3093

    • MySQL build stuck for 6h DBZ-3095

    • Rewrite legacy reader tests DBZ-3099

    • Intermittent test failure in Postgres PostgresConnectorIT#customSnapshotterSkipsTablesOnRestart DBZ-3107

    • Remove duplicate anchor links in Connector properties DBZ-3111

    • Upgrade to Quarkus 1.12.0.Final DBZ-3116

    • Config validation for Vitess DBZ-3117

    • Config validation for Oracle DBZ-3119

    • Avoid naming conflict between connection classes DBZ-3147

    • Set up commit message check for Vitess DBZ-3152

    • Put IIDR license requirement into NOTE box DBZ-3163

    • Consistent logging of connection validation failure DBZ-3164

    • Remove COLUMN_BLACK_LIST option in Oracle connector DBZ-3167

    Release 1.5.0.Alpha1 (February 4th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.6.1 and has been tested with version 2.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.5.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.5.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.5.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    A new capturing implementation for the Debezium MySQL connector has been created (DBZ-1865) based on the common connector framework used by all the other Kafka Connect connectors of Debezium. The connector behaviour is almost in parity with previous implementation, with the exception of the experimental parallel snapshotting feature (DBZ-175), which isn’t available with the new implementation yet and which is planned to be re-introduced later in a different form.

    In addition, records in the schema change topic now have a non-optional field tableChanges, which describes the represented schema change in a structured way. If you are using a schema registry, use a compatibility setting of FORWARD for the schema change topic, so to make sure that the new schema version used for schema change events can be registered successfully.

    If you encounter any issues with the new MySQL connector implementation, please log a Jira issue; in this case, you can use the legacy implementation by setting the internal.implementation=legacy connector configuration option.

    New Features

    • Support emitting TRUNCATE events in PostgreSQL pgoutput plugin DBZ-2382

    • Migrate DebeziumContainer enhancements for DBZ-2950 and DBZ-2952 into main repository DBZ-3024

    • Implement meta tags DBZ-2620

    • Improve performance for very large postgres schemas DBZ-2575

    Fixes

    • Extra connectors are not buildable unless main Debezium is built locally DBZ-2901

    • java.sql.SQLException: ORA-01333: failed to establish Logminer Dictionary DBZ-2939

    • Add support for connector/task lifecycle ops to UI backend DBZ-2951

    • Cassandra CDC failed to deserialize list<UserType> column correct DBZ-2974

    • Debezium Oracle Connector will appear stuck on large SCN jumps DBZ-2982

    • Invalid regex patterns should fail validation when validation database.include/exclude.list properties for MySQL connector DBZ-3008

    • Fix repository config for Jenkis snapshot deployment DBZ-3011

    • Unable to parse non-constant SIGNAL option value DBZ-3018

    • Cannot parse expression in DEFAULT column definition DBZ-3020

    • Key being used as value in pubsub batch handler DBZ-3037

    • Table creation DDL with CHARACTER SET = DEFAULT causes MySQL connector failure DBZ-3023

    • Missing some MariaDB existence predicates in ALTER TABLE DBZ-3039

    Other changes

    • Improved resiliency of release process against OSS failures DBZ-2274

    • Pull up HOSTNAME, PORT, DATABASE_NAME, USER and PASSWORD to RelationalDatabaseConnectorConfig DBZ-2420

    • Db2 Connector doesn’t declare database related config options DBZ-2424

    • Fix build status badge in README files DBZ-2802

    • Merge and complete web components PR DBZ-2804

    • IBM Db2 Connector promoted to GA DBZ-2814

    • Document several Oracle frequently encountered problems DBZ-2970

    • No syntax highlighting on website listings DBZ-2978

    • Admonition icons missing DBZ-2986

    • Improve logging for Logminer adapter DBZ-2999

    • CI build not required for changes in README files DBZ-3012

    • Execute ZZZGtidSetIT as the last test DBZ-3047

    • Capture and report LogMiner state when mining session fails to start DBZ-3055

    \ No newline at end of file diff --git a/releases/1.6/index.html b/releases/1.6/index.html index 47896520bb..1e03917005 100644 --- a/releases/1.6/index.html +++ b/releases/1.6/index.html @@ -1 +1 @@ - Debezium Release Series 1.6

    stable

    Tested Versions

    Java 11+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.21
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 4.2.1
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.14
    Oracle Database: 12c, 19c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.11.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 8.0.x*, 9.0.x
    Driver: 9.0.0
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    1.6.0.Beta1

    2021-05-20
    LOB datatypes and DDL support for Oracle; Incremntal snapshottign supported for Db2 and SQL Server Connectors; All PostgreSQL snapshotting modes are using exported approach
    \ No newline at end of file + Debezium Release Series 1.6

    stable

    Tested Versions

    Java 11+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.21
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 4.2.1
    PostgreSQL Database: 9.6, 10, 11, 12
    Driver: 42.2.14
    Oracle Database: 12c, 19c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.11.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 8.0.x*, 9.0.x
    Driver: 9.0.0
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    1.6.0.Beta1

    2021-05-20
    LOB datatypes and DDL support for Oracle; Incremntal snapshottign supported for Db2 and SQL Server Connectors; All PostgreSQL snapshotting modes are using exported approach
    \ No newline at end of file diff --git a/releases/1.6/release-notes.html b/releases/1.6/release-notes.html index eaeab63683..977ee3953e 100644 --- a/releases/1.6/release-notes.html +++ b/releases/1.6/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.6

    Release Notes for Debezium 1.6

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.6.4.Final (December 1st, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.1 and has been tested with version 2.7.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the releases.

    Fixes

    • OracleSchemaMigrationIT#shouldNotEmitDdlEventsForNonTableObjects fails for Xstream DBZ-4186

    • DML statement couldn’t be parsed DBZ-4194

    • Debezium log miner processes get terminated with ORA-04030 error in idle database environment. DBZ-4204

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira issue with the statement 'DROP TABLE IF EXISTS condition' DBZ-4210

    • DDL statement couldn’t be parsed DBZ-4224

    • CHAR / NCHAR precision is not correctly derived from DDL statements DBZ-4233

    • RelationalChangeRecordEmitter calls "LoggerFactory.getLogger(getClass())" for each instance of the emitter DBZ-4309

    Other changes

    • Document incremental chunk size setting DBZ-4127

    • Missing documentation for max.iteration.transactions option DBZ-4129

    • Upgrade MySQL JDBC driver to 8.0.27 DBZ-4286

    Release 1.6.3.Final (October 21st, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.1 and has been tested with version 2.7.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Oracle - Provide a more user-friendly way to update SCN DBZ-3876

    Fixes

    • Status stays in RUNNING for Postgres Connector after Postgres is stopped DBZ-3655

    • Oracle connector unable to start in archive only mode DBZ-3712

    • Row hashing in LogMiner Query not able to differentiate between rows of a statement. DBZ-3834

    • The chunk select statement is incorrect for combined primary key in incremental snapshot DBZ-3860

    • DML statement couldn’t be parsed DBZ-3892

    • Oracle Connector replicating data from all PDBs. Missing PDB filter during replication. DBZ-3954

    • Oracle connector Parsing Exception: DDL statement couldn’t be parsed DBZ-3962

    • Oracle-Connector fails parsing a DDL statement DBZ-3977

    • Oracle connector fails after error ORA-01327 DBZ-4010

    • Incorrect incremental snapshot DDL triggers snapshot that generates unending* inserts against signalling table DBZ-4013

    • Oracle-Connector fails parsing a DDL statement (truncate partition) DBZ-4017

    • DDL statement couldn’t be parsed DBZ-4026

    • Question about handling Raw column types DBZ-4037

    • Incremental snapshotting of a table can be prematurely terminated after restart DBZ-4057

    • Events are missed with Oracle connector due to LGWR buffer not being flushed to redo logs DBZ-4067

    • Database history is constantly being reconfigured DBZ-4106

    • Debezium deals with Oracle DDL appeared IndexOutOfBoundsException: Index: 0, Size: 0 DBZ-4135

    • Possible OutOfMemoryError with tracking schema changes DBZ-4151

    • DDL ParsingException - not all table compression modes are supported DBZ-4158

    Other changes

    • Hyper-link references between options in the Outbox SMT options table DBZ-3920

    • Incorrect documentation for message.key.columns DBZ-3437

    • Promote Outbox SMT to GA DBZ-3584

    • Upgrade to binlog-client 0.25.3 DBZ-3787

    • Document awareness of Oracle database tuning DBZ-3880

    • Update antora.yml file with new values for SMT attributes DBZ-3922

    • Add top-level Transformation menu node for downstream docs DBZ-3931

    • Update docs to specify that connectors track metadata only for transactions that occur after deployment DBZ-3961

    • Remove GRANT ALTER ANY TABLE from Oracle documentation DBZ-4007

    • Misc. MongoDB connector docs fixes DBZ-4149

    Release 1.6.2.Final (September 2nd, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.1 and has been tested with version 2.7.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Broken links in Avro and Outbox Event Router documentation DBZ-3430

    • Oracle LogMiner DdlParser Error DBZ-3723

    • DDL statement couldn’t be parsed DBZ-3755

    • Debezium Oracle connector stops with DDL parsing error DBZ-3759

    • Debezium snapshot.select.statement.overrides overrides not used DBZ-3760

    • Server name pattern is unnecessarily restrictive. DBZ-3765

    • Exception thrown from getTableColumnsFromDatabase DBZ-3769

    • Crash when processing MySQL 5.7.28 TIME fields returns empty blob instead of null DBZ-3773

    • Add DEFAULT to partition option engine DBZ-3784

    • Initiating MongoDB connector causes oplog table scan DBZ-3788

    • SRCFG00014: The config property debezium.sink.pravega.scope is required but it could not be found in any config source DBZ-3792

    • Debezium 1.6.1 expecting database.port even when database.url is provided in config. DBZ-3813

    • Postgres numeric default value throwing exception DBZ-3816

    • Snapshot locking mode "minimal_percona" incorrectly resets transaction & isolation state DBZ-3838

    • Typo with prodname asciidoc attribute usage DBZ-3856

    • Oracle unparsable DDL issue DBZ-3877

    • Support AS clause in GRANT statement DBZ-3878

    • Error Parsing Oracle DDL dropping PK DBZ-3886

    • EMPTY_CLOB() and EMPTY_BLOB() should be treated as empty LOB values DBZ-3893

    • Oracle DDL parsing issue DBZ-3896

    • DEBEZIUM producer stops unexpectedly trying to change column in table which does not exist DBZ-3898

    • "binary.handling.mode": "hex" setting works incorrectly for values with trailing zeros DBZ-3912

    • Incorrect validation of truncate handling mode DBZ-3935

    Other changes

    • Discuss SMT predicates in docs DBZ-3227

    • Adjust to changed Strimzi CRDs DBZ-3385

    • Clarify lifecycle of snapshot metrics DBZ-3613

    • Error in description of the property column.mask.hash.hashAlgorithm.with.salt.salt DBZ-3802

    • Improperly constructed links generating downstream build errors DBZ-3858

    • Extract new top-level menu node for SMTs DBZ-3873

    • Upgrade to Jackson Databind version 2.10.5.1 DBZ-3927

    • Upgrade ZooKeeper in 1.6 container image DBZ-3950

    • Upgrade Kafka container image to 2.7.1 DBZ-3956

    • Performance issue due to inefficient ObjectMapper initialization DBZ-3770

    Release 1.6.1.Final (July 23rd, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Support invisible columns with MySql 8.0.23+ DBZ-3623

    • Db2Connector is unable to establish validation connection DBZ-3632

    • MySQL Connector error after execute a "create role" statement DBZ-3686

    • Error parsing query, even with database.history.skip.unparseable.ddl DBZ-3717

    • Support for TABLE_TYPE missing form MySQL grammar DBZ-3718

    • Debezium mysql connector plugin throws SQL syntax error during incremental snapshot DBZ-3725

    Other changes

    • Add documentation about new capturing implementation for the MySQL connector to downstream product DBZ-3140

    • Doc clarification on connector rewrite DBZ-3711

    • Update Oracle connector deployment instructions for consistency DBZ-3772

    Release 1.6.0.Final (June 30th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Allow specifying of Oracle archive log location DBZ-3661

    Fixes

    • Fix connect container build to be compatible with Oracle Instant Client DBZ-2547

    • Schema change events of excluded databases are discarded DBZ-3622

    • Provide a descriptive error when enabling log.mining.archive.log.only.mode with an offset SCN that isn’t yet in an archive log. DBZ-3665

    • When LOB support is disabled, use legacy SCN mining algorithm DBZ-3676

    Other changes

    • Oracle connector error with tables using unique index keys: "key must not be null" DBZ-1211

    • Database history properties missing in connector docs DBZ-3459

    • Oracle connector doc fixes DBZ-3662

    • Change the reached max batch size log message to DEBUG level DBZ-3664

    • Remove unused code DBZ-3672

    • Update deprecated config for debezium smt DBZ-3673

    • Align Antlr versions used during testing DBZ-3675

    Release 1.6.0.CR1 (June 24th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Implement SKIPPED_OPERATIONS for SQLServer DBZ-2697

    • Handling database connection timeout during schema recovery DBZ-3615

    • Scope mined DDL events to include/exclude lists if provided DBZ-3634

    • Support heartbeats during periods of low change event activity DBZ-3639

    Fixes

    • Fix exception on not found table DBZ-3523

    • Transaction commit event dispatch fails if no active transaction in progress. DBZ-3593

    • Additional unique index referencing columns not exposed by CDC causes exception DBZ-3597

    • GRANT/REVOKE for roles is not working DBZ-3610

    • ParsingException for ALTER TABLE against a table that is unknown to the connector. DBZ-3612

    • Oracle connector continually logging warnings about already processed transactions. DBZ-3616

    • StringIndexOutOfBoundsException thrown while handling UTF-8 characters DBZ-3618

    • DDL ParsingException - "SUPPLEMENTAL LOG DATA (UNIQUE INDEX) COLUMNS" DBZ-3619

    • Oracle transaction reconciliation fails to lookup primary key columns if UPDATE sets columns to only NULL DBZ-3631

    • Oracle DDL parser fails on CREATE TABLE: mismatched input 'maxtrans' expecting {'AS', ';'} DBZ-3641

    • Antlr version mismatch DBZ-3646

    • SQL Agent does not start in SqlServer image when deployed to openshift DBZ-3648

    • Java UBI image is lacking gzip utility DBZ-3659

    Other changes

    • Upgrade to Apicurio Registry 2.0 DBZ-3171

    • Vitess: rename "master" branch to "main" DBZ-3275

    • Formatting updates to correct errors in documentation builds DBZ-3518

    • Prepare test-suite for Kafka on RHEL DBZ-3566

    • Upgrade to Quarkus 2.0.0.Final DBZ-3602

    • Some dependencies are broken in ocp testsuite after BOM introduction DBZ-3625

    • Handle updated json schema for connector passwords DBZ-3637

    • MySQL SourceInfo should be public DBZ-3638

    • Change CLOB/BLOB data type support to an opt-in feature DBZ-3645

    • Denote BLOB support as incubating DBZ-3651

    Release 1.6.0.Beta2 (June 10th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Clarification on MySQL vs MariaDb Usage DBZ-1145

    • Pravega sink for Debezium Server DBZ-3546

    • Postgres - Column default values are not extracted DBZ-2790

    • Add support for snapshot.include.collection.list DBZ-3062

    • Apply filters with empty filter changes 'Exclude' selection to 'Include' DBZ-3102

    • Adjust OpenShift tests to support new version of Strimzi CRDs DBZ-3475

    • Remove SchemaProcessor From Cassandra Connector DBZ-3506

    • Provide a snapshot.locking.mode option for Oracle DBZ-3557

    • Implement support for JSON function in MySQL parser DBZ-3559

    Fixes

    • AbstractConnectorTest should work in environment with longer latency DBZ-400

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-3068

    • SQL Server connector buffers all CDC events in memory if more than one table is captured DBZ-3486

    • SQLServer low throughput tables increase usage of TempDB DBZ-3515

    • Incorrectly identifies primary member of replica set DBZ-3522

    • Cannot enable binlog streaming when INITIAL_ONLY snapshot mode configured DBZ-3529

    • Connector CRD name and database.server.name cannot use the same value in OCP test-suite DBZ-3538

    • SelectLobParser checks for lowercase "is null" instead of uppercase "IS NULL" DBZ-3545

    • DDL ParsingException "mismatched input 'sharing'" for create table syntax. DBZ-3549

    • DDL ParsingException on alter table DBZ-3554

    • ORA-00310 when online redo log is archived and replaced by redo log with new sequence DBZ-3561

    • Server name pattern is unnecessarily restrictive DBZ-3562

    • ORA-01289 error encountered on Oracle RAC when multiple logs are mined with same sequence number DBZ-3563

    • MySQL metrics documentation refers to legacy implementation DBZ-3572

    • Update downstream MySQL doc to reference streaming metrics vs. binlog metrics DBZ-3582

    • No viable alternative at input "add COLUMN optional" DBZ-3586

    • NPE when OracleValueConverters get unsupported jdbc type DBZ-3587

    • SelectLobParser throws NullPointerException when parsing SQL for an unknown table DBZ-3591

    • Pulsar sink tries to convert null key to string DBZ-3595

    • Oracle RAC URL does not correctly substitute node IP addresses DBZ-3599

    • Oracle Connector - got InputMismatchException mismatched input 'CASCADE' expecting {'AS', 'PURGE', ';'} DBZ-3606

    Other changes

    • Unsupported column types should be ignored as with other connectors DBZ-814

    • Make outbox extensions dependency on tracing extension optional DBZ-2834

    • Avoid copying in DML handling DBZ-3328

    • Document impact of using --hostname when starting Connect container DBZ-3466

    • Update external link to AMQ Streams documentation DBZ-3502

    • Update external links in downstream docs to AMQ Streams deployment information DBZ-3525

    • Debezium Server Core builds plugin artifact DBZ-3542

    • List contributors script fails when name contains a "/" character DBZ-3544

    • Upgrade to Quarkus 2.0.0.CR3 DBZ-3550

    • Reduce DB round-trips for LOB handling DBZ-3556

    • Oracle benchmark does not execute LogMiner parser performance tests DBZ-3560

    • Clarify purpose of database.history.retention.hours DBZ-3565

    • Improve documentation related to signalling table DDL DBZ-3568

    • cassandra-driver-core 3.5.0 managed in Debezium BOM too old for testcontainers 1.15.3 DBZ-3589

    • Remove some dead code in Postgres connector DBZ-3596

    • Debezium server sink oracle database to pulsar without default namespace "public/default" DBZ-3601

    • Document OffsetContext.incrementalSnapshotEvents() DBZ-3607

    • Database skipping logic isn’t correct DBZ-3608

    Release 1.6.0.Beta1 (May 20th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    RENAME TABLE statement with multiple tables now emits multiple schema change events, one for each of the renamed tables (DBZ-3399).

    New Features

    • Support ad hoc snapshots on MySQL connector DBZ-66

    • Support DDL operations DBZ-2916

    • Add support for RAW, LONG, LONG RAW, BLOB, and CLOB data types DBZ-2948

    • Update Doc For Cassandra Connector DBZ-3092

    • Document log.mining.strategy for Oracle connector DBZ-3393

    • Update DOC with the new NUM_OF_CHANGE_EVENT_QUEUES parameter DBZ-3480

    • Use date format model that does not depend on client NLS settings in integration tests DBZ-3482

    • Provide Japanese translation of README.md DBZ-3503

    • Better handling of invalid SQL Server connector configuration DBZ-3505

    • Allow table.include.list and table.exclude.list to be updated after a connector is created DBZ-1263

    • Allow retry when SQL Server is down temporarily DBZ-3339

    Fixes

    • Database name should not be converted to lower case if tablenameCaseInsensitive=True in Oracle Connector DBZ-2203

    • Not able to configure Debezium Server via smallrye/microprofile environment variables DBZ-2622

    • Upgrading from debezium 1.2.2 to 1.4.0 stopped snapshotting new tables DBZ-2944

    • oracle logminer cannot add duplicate logfile DBZ-3266

    • Oracle connector does not correctly handle partially committed transactions DBZ-3322

    • Data loss when MongoDB snapshot take longer than the Oplog Window DBZ-3331

    • First online log query does not limit results to those that are available. DBZ-3332

    • Connector crashing after running for some time DBZ-3377

    • Broken links in downstream Monitoring chapter DBZ-3408

    • Broken links in User guide table of routing SMT configuration options DBZ-3410

    • Broken link to basic configuration example in downstream content-based routing topic DBZ-3412

    • Cassandra connector does not react on schema changes properly DBZ-3417

    • Debezium mapped diagnostic contexts doesn’t work DBZ-3438

    • source.timestamp.mode=commit imposes a significant performance penalty DBZ-3452

    • Timezone difference not considered in LagFromSourceInMilliseconds calculation DBZ-3456

    • "Found null value for non-optional schema" error when issuing TRUNCATE from Postgres on a table with a PK DBZ-3469

    • Connector crashes when table name contains '-' character DBZ-3485

    • Kafka Clients in Debezium Server is not aligned with Debezium Kafka version DBZ-3498

    • ReadToInsertEvent SMT needs to set ConfigDef DBZ-3508

    • Debezium configuration can be modified after instantiation DBZ-3514

    • Oracle redo log switch not detected when using multiple archiver process threads DBZ-3516

    • Cannot enable binlog streaming when INITIAL_ONLY snapshot mode configured DBZ-3529

    • Missing schema function in DDL Parser DBZ-3543

    • Retry logic for "No more data to read from socket" is too strict DBZ-3472

    Other changes

    • Document new source block and fix formatting issues DBZ-1614

    • Re-connect after "too many connections" DBZ-2300

    • Modularize doc for MongoDB component DBZ-2334

    • Rebase Postgres snapshot modes on exported snapshots DBZ-2337

    • Enable continuous JFR recording DBZ-3082

    • Remove deprecated Oracle connector option "database.tablename.case.insensitive" DBZ-3240

    • Improve Oracle redo logs query to avoid de-duplication step DBZ-3256

    • Migrate Jenkins CI to OCP 4.0 in PSI cloud DBZ-3396

    • Remove Antlr-based DML Parser DBZ-3400

    • Update Oracle driver version DBZ-3460

    • Incremental snapshot follow-up tasks DBZ-3500

    • Unnecessary NPE due to autoboxing DBZ-3519

    • Upgrade actions/cache to v2 version for formatting check DBZ-3520

    • Improve documentation for Oracle supplemental logging requirements DBZ-3521

    • SignalsIT leave table artifacts that cause other tests to fail DBZ-3533

    • Mark xstream dependency as provided DBZ-3539

    • Add test for Oracle table without PK DBZ-832

    Release 1.6.0.Alpha1 (May 6th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Debezium now requires Java 11 as a build and a runtime environment (DBZ-2875). The only exception is Debezium Cassandra connector that still uses Java 8.

    New Features

    • Sink adapter for Apache Kafka DBZ-3382

    • Optimisation on MongoDB and MySQL connector for skipped.operations DBZ-3403

    • Incremental snapshotting DBZ-3473

    Fixes

    • io.debezium.text.ParsingException: no viable alternative at input 'IDNUMBER(4)GENERATEDBY' DBZ-1721

    • SKIPPED_OPERATIONS is added to CommonConnectorConfig.CONFIG_DEFINITION although it’s not implemented in all connectors DBZ-2699

    • Snapshot fails when reading TIME, DATE, DATETIME fields in mysql from ResultSet DBZ-3238

    • Update to fabric8 kube client 5.x DBZ-3349

    • An exception in resolveOracleDatabaseVersion if system language is not English DBZ-3397

    • Change strimzi branch in jenkins openshift-test job to main DBZ-3404

    • Broken link in downstream Monitoring chapter 7.3 DBZ-3409

    • Broken link in content-based routing chapter to page for downloading the SMT scripting archive DBZ-3411

    • LogMinerDmlParser mishandles double single quotes in WHERE clauses DBZ-3413

    • Incorrectly formatted links in downstream automatic topic creation doc DBZ-3414

    • SMT acronym incorrectly expanded in Debezium User Guide DBZ-3415

    • MariaDB — support privilege DDL in parser DBZ-3422

    • Change oc apply in jenkins openshift-test job to oc create DBZ-3423

    • SQL Server property (snapshot.select.statement.overrides) only matches 1st entry if comma-separated list also contains spaces DBZ-3429

    • Permission issue when running docker-compose or docker build as user not having uid 1001 DBZ-3453

    • no viable alternative at input 'DROP TABLE IF EXISTS group' (Galera and MariaDB) DBZ-3467

    • Debezium MySQL connector does not process tables with partitions DBZ-3468

    • The building tools' version in README doc is outdated DBZ-3478

    • MySQL DATE default value parser rejects timestamp DBZ-3497

    • MySQL8 GRANT statement not parsable DBZ-3499

    Other changes

    • Config validation for Db2 DBZ-3118

    • Add smoke test for UI DBZ-3133

    • Create new metric "CapturedTables" DBZ-3161

    • Handle deadlock issue for MySql build stuck for 6h DBZ-3233

    • Document using Connect REST API for log level changes DBZ-3270

    • User Guide corrections for SQL Server connector DBZ-3297

    • User Guide corrections for Db2 connector DBZ-3298

    • User Guide corrections for MySQL connector DBZ-3299

    • User Guide corrections for MongoDB connector DBZ-3300

    • Allow building the Oracle connector on CI DBZ-3365

    • Add tests for Protobuf Converter DBZ-3369

    • Use current SQL Server container image for testing and examples DBZ-3379

    • Reword prereq in downstream SQL Server connector doc DBZ-3392

    • Duplicate entry in MySQL connector properties table for mysql-property-skipped-operations DBZ-3402

    • Docs clarification around tombstone events DBZ-3416

    • Validate logical server name contains only alpha-numerical characters DBZ-3427

    • Provide a "quick" build profile DBZ-3449

    • Avoid warning about superfluous exclusion during packaging DBZ-3458

    • Upgrade binlog client DBZ-3463

    \ No newline at end of file + Release Notes for Debezium 1.6

    Release Notes for Debezium 1.6

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.6.4.Final (December 1st, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.1 and has been tested with version 2.7.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the releases.

    Fixes

    • OracleSchemaMigrationIT#shouldNotEmitDdlEventsForNonTableObjects fails for Xstream DBZ-4186

    • DML statement couldn’t be parsed DBZ-4194

    • Debezium log miner processes get terminated with ORA-04030 error in idle database environment. DBZ-4204

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira issue with the statement 'DROP TABLE IF EXISTS condition' DBZ-4210

    • DDL statement couldn’t be parsed DBZ-4224

    • CHAR / NCHAR precision is not correctly derived from DDL statements DBZ-4233

    • RelationalChangeRecordEmitter calls "LoggerFactory.getLogger(getClass())" for each instance of the emitter DBZ-4309

    Other changes

    • Document incremental chunk size setting DBZ-4127

    • Missing documentation for max.iteration.transactions option DBZ-4129

    • Upgrade MySQL JDBC driver to 8.0.27 DBZ-4286

    Release 1.6.3.Final (October 21st, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.1 and has been tested with version 2.7.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Oracle - Provide a more user-friendly way to update SCN DBZ-3876

    Fixes

    • Status stays in RUNNING for Postgres Connector after Postgres is stopped DBZ-3655

    • Oracle connector unable to start in archive only mode DBZ-3712

    • Row hashing in LogMiner Query not able to differentiate between rows of a statement. DBZ-3834

    • The chunk select statement is incorrect for combined primary key in incremental snapshot DBZ-3860

    • DML statement couldn’t be parsed DBZ-3892

    • Oracle Connector replicating data from all PDBs. Missing PDB filter during replication. DBZ-3954

    • Oracle connector Parsing Exception: DDL statement couldn’t be parsed DBZ-3962

    • Oracle-Connector fails parsing a DDL statement DBZ-3977

    • Oracle connector fails after error ORA-01327 DBZ-4010

    • Incorrect incremental snapshot DDL triggers snapshot that generates unending* inserts against signalling table DBZ-4013

    • Oracle-Connector fails parsing a DDL statement (truncate partition) DBZ-4017

    • DDL statement couldn’t be parsed DBZ-4026

    • Question about handling Raw column types DBZ-4037

    • Incremental snapshotting of a table can be prematurely terminated after restart DBZ-4057

    • Events are missed with Oracle connector due to LGWR buffer not being flushed to redo logs DBZ-4067

    • Database history is constantly being reconfigured DBZ-4106

    • Debezium deals with Oracle DDL appeared IndexOutOfBoundsException: Index: 0, Size: 0 DBZ-4135

    • Possible OutOfMemoryError with tracking schema changes DBZ-4151

    • DDL ParsingException - not all table compression modes are supported DBZ-4158

    Other changes

    • Hyper-link references between options in the Outbox SMT options table DBZ-3920

    • Incorrect documentation for message.key.columns DBZ-3437

    • Promote Outbox SMT to GA DBZ-3584

    • Upgrade to binlog-client 0.25.3 DBZ-3787

    • Document awareness of Oracle database tuning DBZ-3880

    • Update antora.yml file with new values for SMT attributes DBZ-3922

    • Add top-level Transformation menu node for downstream docs DBZ-3931

    • Update docs to specify that connectors track metadata only for transactions that occur after deployment DBZ-3961

    • Remove GRANT ALTER ANY TABLE from Oracle documentation DBZ-4007

    • Misc. MongoDB connector docs fixes DBZ-4149

    Release 1.6.2.Final (September 2nd, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.1 and has been tested with version 2.7.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Broken links in Avro and Outbox Event Router documentation DBZ-3430

    • Oracle LogMiner DdlParser Error DBZ-3723

    • DDL statement couldn’t be parsed DBZ-3755

    • Debezium Oracle connector stops with DDL parsing error DBZ-3759

    • Debezium snapshot.select.statement.overrides overrides not used DBZ-3760

    • Server name pattern is unnecessarily restrictive. DBZ-3765

    • Exception thrown from getTableColumnsFromDatabase DBZ-3769

    • Crash when processing MySQL 5.7.28 TIME fields returns empty blob instead of null DBZ-3773

    • Add DEFAULT to partition option engine DBZ-3784

    • Initiating MongoDB connector causes oplog table scan DBZ-3788

    • SRCFG00014: The config property debezium.sink.pravega.scope is required but it could not be found in any config source DBZ-3792

    • Debezium 1.6.1 expecting database.port even when database.url is provided in config. DBZ-3813

    • Postgres numeric default value throwing exception DBZ-3816

    • Snapshot locking mode "minimal_percona" incorrectly resets transaction & isolation state DBZ-3838

    • Typo with prodname asciidoc attribute usage DBZ-3856

    • Oracle unparsable DDL issue DBZ-3877

    • Support AS clause in GRANT statement DBZ-3878

    • Error Parsing Oracle DDL dropping PK DBZ-3886

    • EMPTY_CLOB() and EMPTY_BLOB() should be treated as empty LOB values DBZ-3893

    • Oracle DDL parsing issue DBZ-3896

    • DEBEZIUM producer stops unexpectedly trying to change column in table which does not exist DBZ-3898

    • "binary.handling.mode": "hex" setting works incorrectly for values with trailing zeros DBZ-3912

    • Incorrect validation of truncate handling mode DBZ-3935

    Other changes

    • Discuss SMT predicates in docs DBZ-3227

    • Adjust to changed Strimzi CRDs DBZ-3385

    • Clarify lifecycle of snapshot metrics DBZ-3613

    • Error in description of the property column.mask.hash.hashAlgorithm.with.salt.salt DBZ-3802

    • Improperly constructed links generating downstream build errors DBZ-3858

    • Extract new top-level menu node for SMTs DBZ-3873

    • Upgrade to Jackson Databind version 2.10.5.1 DBZ-3927

    • Upgrade ZooKeeper in 1.6 container image DBZ-3950

    • Upgrade Kafka container image to 2.7.1 DBZ-3956

    • Performance issue due to inefficient ObjectMapper initialization DBZ-3770

    Release 1.6.1.Final (July 23rd, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    There are no new features in the release.

    Fixes

    • Support invisible columns with MySql 8.0.23+ DBZ-3623

    • Db2Connector is unable to establish validation connection DBZ-3632

    • MySQL Connector error after execute a "create role" statement DBZ-3686

    • Error parsing query, even with database.history.skip.unparseable.ddl DBZ-3717

    • Support for TABLE_TYPE missing form MySQL grammar DBZ-3718

    • Debezium mysql connector plugin throws SQL syntax error during incremental snapshot DBZ-3725

    Other changes

    • Add documentation about new capturing implementation for the MySQL connector to downstream product DBZ-3140

    • Doc clarification on connector rewrite DBZ-3711

    • Update Oracle connector deployment instructions for consistency DBZ-3772

    Release 1.6.0.Final (June 30th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Allow specifying of Oracle archive log location DBZ-3661

    Fixes

    • Fix connect container build to be compatible with Oracle Instant Client DBZ-2547

    • Schema change events of excluded databases are discarded DBZ-3622

    • Provide a descriptive error when enabling log.mining.archive.log.only.mode with an offset SCN that isn’t yet in an archive log. DBZ-3665

    • When LOB support is disabled, use legacy SCN mining algorithm DBZ-3676

    Other changes

    • Oracle connector error with tables using unique index keys: "key must not be null" DBZ-1211

    • Database history properties missing in connector docs DBZ-3459

    • Oracle connector doc fixes DBZ-3662

    • Change the reached max batch size log message to DEBUG level DBZ-3664

    • Remove unused code DBZ-3672

    • Update deprecated config for debezium smt DBZ-3673

    • Align Antlr versions used during testing DBZ-3675

    Release 1.6.0.CR1 (June 24th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Implement SKIPPED_OPERATIONS for SQLServer DBZ-2697

    • Handling database connection timeout during schema recovery DBZ-3615

    • Scope mined DDL events to include/exclude lists if provided DBZ-3634

    • Support heartbeats during periods of low change event activity DBZ-3639

    Fixes

    • Fix exception on not found table DBZ-3523

    • Transaction commit event dispatch fails if no active transaction in progress. DBZ-3593

    • Additional unique index referencing columns not exposed by CDC causes exception DBZ-3597

    • GRANT/REVOKE for roles is not working DBZ-3610

    • ParsingException for ALTER TABLE against a table that is unknown to the connector. DBZ-3612

    • Oracle connector continually logging warnings about already processed transactions. DBZ-3616

    • StringIndexOutOfBoundsException thrown while handling UTF-8 characters DBZ-3618

    • DDL ParsingException - "SUPPLEMENTAL LOG DATA (UNIQUE INDEX) COLUMNS" DBZ-3619

    • Oracle transaction reconciliation fails to lookup primary key columns if UPDATE sets columns to only NULL DBZ-3631

    • Oracle DDL parser fails on CREATE TABLE: mismatched input 'maxtrans' expecting {'AS', ';'} DBZ-3641

    • Antlr version mismatch DBZ-3646

    • SQL Agent does not start in SqlServer image when deployed to openshift DBZ-3648

    • Java UBI image is lacking gzip utility DBZ-3659

    Other changes

    • Upgrade to Apicurio Registry 2.0 DBZ-3171

    • Vitess: rename "master" branch to "main" DBZ-3275

    • Formatting updates to correct errors in documentation builds DBZ-3518

    • Prepare test-suite for Kafka on RHEL DBZ-3566

    • Upgrade to Quarkus 2.0.0.Final DBZ-3602

    • Some dependencies are broken in ocp testsuite after BOM introduction DBZ-3625

    • Handle updated json schema for connector passwords DBZ-3637

    • MySQL SourceInfo should be public DBZ-3638

    • Change CLOB/BLOB data type support to an opt-in feature DBZ-3645

    • Denote BLOB support as incubating DBZ-3651

    Release 1.6.0.Beta2 (June 10th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in the release.

    New Features

    • Clarification on MySQL vs MariaDb Usage DBZ-1145

    • Pravega sink for Debezium Server DBZ-3546

    • Postgres - Column default values are not extracted DBZ-2790

    • Add support for snapshot.include.collection.list DBZ-3062

    • Apply filters with empty filter changes 'Exclude' selection to 'Include' DBZ-3102

    • Adjust OpenShift tests to support new version of Strimzi CRDs DBZ-3475

    • Remove SchemaProcessor From Cassandra Connector DBZ-3506

    • Provide a snapshot.locking.mode option for Oracle DBZ-3557

    • Implement support for JSON function in MySQL parser DBZ-3559

    Fixes

    • AbstractConnectorTest should work in environment with longer latency DBZ-400

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-3068

    • SQL Server connector buffers all CDC events in memory if more than one table is captured DBZ-3486

    • SQLServer low throughput tables increase usage of TempDB DBZ-3515

    • Incorrectly identifies primary member of replica set DBZ-3522

    • Cannot enable binlog streaming when INITIAL_ONLY snapshot mode configured DBZ-3529

    • Connector CRD name and database.server.name cannot use the same value in OCP test-suite DBZ-3538

    • SelectLobParser checks for lowercase "is null" instead of uppercase "IS NULL" DBZ-3545

    • DDL ParsingException "mismatched input 'sharing'" for create table syntax. DBZ-3549

    • DDL ParsingException on alter table DBZ-3554

    • ORA-00310 when online redo log is archived and replaced by redo log with new sequence DBZ-3561

    • Server name pattern is unnecessarily restrictive DBZ-3562

    • ORA-01289 error encountered on Oracle RAC when multiple logs are mined with same sequence number DBZ-3563

    • MySQL metrics documentation refers to legacy implementation DBZ-3572

    • Update downstream MySQL doc to reference streaming metrics vs. binlog metrics DBZ-3582

    • No viable alternative at input "add COLUMN optional" DBZ-3586

    • NPE when OracleValueConverters get unsupported jdbc type DBZ-3587

    • SelectLobParser throws NullPointerException when parsing SQL for an unknown table DBZ-3591

    • Pulsar sink tries to convert null key to string DBZ-3595

    • Oracle RAC URL does not correctly substitute node IP addresses DBZ-3599

    • Oracle Connector - got InputMismatchException mismatched input 'CASCADE' expecting {'AS', 'PURGE', ';'} DBZ-3606

    Other changes

    • Unsupported column types should be ignored as with other connectors DBZ-814

    • Make outbox extensions dependency on tracing extension optional DBZ-2834

    • Avoid copying in DML handling DBZ-3328

    • Document impact of using --hostname when starting Connect container DBZ-3466

    • Update external link to AMQ Streams documentation DBZ-3502

    • Update external links in downstream docs to AMQ Streams deployment information DBZ-3525

    • Debezium Server Core builds plugin artifact DBZ-3542

    • List contributors script fails when name contains a "/" character DBZ-3544

    • Upgrade to Quarkus 2.0.0.CR3 DBZ-3550

    • Reduce DB round-trips for LOB handling DBZ-3556

    • Oracle benchmark does not execute LogMiner parser performance tests DBZ-3560

    • Clarify purpose of database.history.retention.hours DBZ-3565

    • Improve documentation related to signalling table DDL DBZ-3568

    • cassandra-driver-core 3.5.0 managed in Debezium BOM too old for testcontainers 1.15.3 DBZ-3589

    • Remove some dead code in Postgres connector DBZ-3596

    • Debezium server sink oracle database to pulsar without default namespace "public/default" DBZ-3601

    • Document OffsetContext.incrementalSnapshotEvents() DBZ-3607

    • Database skipping logic isn’t correct DBZ-3608

    Release 1.6.0.Beta1 (May 20th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    RENAME TABLE statement with multiple tables now emits multiple schema change events, one for each of the renamed tables (DBZ-3399).

    New Features

    • Support ad hoc snapshots on MySQL connector DBZ-66

    • Support DDL operations DBZ-2916

    • Add support for RAW, LONG, LONG RAW, BLOB, and CLOB data types DBZ-2948

    • Update Doc For Cassandra Connector DBZ-3092

    • Document log.mining.strategy for Oracle connector DBZ-3393

    • Update DOC with the new NUM_OF_CHANGE_EVENT_QUEUES parameter DBZ-3480

    • Use date format model that does not depend on client NLS settings in integration tests DBZ-3482

    • Provide Japanese translation of README.md DBZ-3503

    • Better handling of invalid SQL Server connector configuration DBZ-3505

    • Allow table.include.list and table.exclude.list to be updated after a connector is created DBZ-1263

    • Allow retry when SQL Server is down temporarily DBZ-3339

    Fixes

    • Database name should not be converted to lower case if tablenameCaseInsensitive=True in Oracle Connector DBZ-2203

    • Not able to configure Debezium Server via smallrye/microprofile environment variables DBZ-2622

    • Upgrading from debezium 1.2.2 to 1.4.0 stopped snapshotting new tables DBZ-2944

    • oracle logminer cannot add duplicate logfile DBZ-3266

    • Oracle connector does not correctly handle partially committed transactions DBZ-3322

    • Data loss when MongoDB snapshot take longer than the Oplog Window DBZ-3331

    • First online log query does not limit results to those that are available. DBZ-3332

    • Connector crashing after running for some time DBZ-3377

    • Broken links in downstream Monitoring chapter DBZ-3408

    • Broken links in User guide table of routing SMT configuration options DBZ-3410

    • Broken link to basic configuration example in downstream content-based routing topic DBZ-3412

    • Cassandra connector does not react on schema changes properly DBZ-3417

    • Debezium mapped diagnostic contexts doesn’t work DBZ-3438

    • source.timestamp.mode=commit imposes a significant performance penalty DBZ-3452

    • Timezone difference not considered in LagFromSourceInMilliseconds calculation DBZ-3456

    • "Found null value for non-optional schema" error when issuing TRUNCATE from Postgres on a table with a PK DBZ-3469

    • Connector crashes when table name contains '-' character DBZ-3485

    • Kafka Clients in Debezium Server is not aligned with Debezium Kafka version DBZ-3498

    • ReadToInsertEvent SMT needs to set ConfigDef DBZ-3508

    • Debezium configuration can be modified after instantiation DBZ-3514

    • Oracle redo log switch not detected when using multiple archiver process threads DBZ-3516

    • Cannot enable binlog streaming when INITIAL_ONLY snapshot mode configured DBZ-3529

    • Missing schema function in DDL Parser DBZ-3543

    • Retry logic for "No more data to read from socket" is too strict DBZ-3472

    Other changes

    • Document new source block and fix formatting issues DBZ-1614

    • Re-connect after "too many connections" DBZ-2300

    • Modularize doc for MongoDB component DBZ-2334

    • Rebase Postgres snapshot modes on exported snapshots DBZ-2337

    • Enable continuous JFR recording DBZ-3082

    • Remove deprecated Oracle connector option "database.tablename.case.insensitive" DBZ-3240

    • Improve Oracle redo logs query to avoid de-duplication step DBZ-3256

    • Migrate Jenkins CI to OCP 4.0 in PSI cloud DBZ-3396

    • Remove Antlr-based DML Parser DBZ-3400

    • Update Oracle driver version DBZ-3460

    • Incremental snapshot follow-up tasks DBZ-3500

    • Unnecessary NPE due to autoboxing DBZ-3519

    • Upgrade actions/cache to v2 version for formatting check DBZ-3520

    • Improve documentation for Oracle supplemental logging requirements DBZ-3521

    • SignalsIT leave table artifacts that cause other tests to fail DBZ-3533

    • Mark xstream dependency as provided DBZ-3539

    • Add test for Oracle table without PK DBZ-832

    Release 1.6.0.Alpha1 (May 6th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.6.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.6.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.6.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Debezium now requires Java 11 as a build and a runtime environment (DBZ-2875). The only exception is Debezium Cassandra connector that still uses Java 8.

    New Features

    • Sink adapter for Apache Kafka DBZ-3382

    • Optimisation on MongoDB and MySQL connector for skipped.operations DBZ-3403

    • Incremental snapshotting DBZ-3473

    Fixes

    • io.debezium.text.ParsingException: no viable alternative at input 'IDNUMBER(4)GENERATEDBY' DBZ-1721

    • SKIPPED_OPERATIONS is added to CommonConnectorConfig.CONFIG_DEFINITION although it’s not implemented in all connectors DBZ-2699

    • Snapshot fails when reading TIME, DATE, DATETIME fields in mysql from ResultSet DBZ-3238

    • Update to fabric8 kube client 5.x DBZ-3349

    • An exception in resolveOracleDatabaseVersion if system language is not English DBZ-3397

    • Change strimzi branch in jenkins openshift-test job to main DBZ-3404

    • Broken link in downstream Monitoring chapter 7.3 DBZ-3409

    • Broken link in content-based routing chapter to page for downloading the SMT scripting archive DBZ-3411

    • LogMinerDmlParser mishandles double single quotes in WHERE clauses DBZ-3413

    • Incorrectly formatted links in downstream automatic topic creation doc DBZ-3414

    • SMT acronym incorrectly expanded in Debezium User Guide DBZ-3415

    • MariaDB — support privilege DDL in parser DBZ-3422

    • Change oc apply in jenkins openshift-test job to oc create DBZ-3423

    • SQL Server property (snapshot.select.statement.overrides) only matches 1st entry if comma-separated list also contains spaces DBZ-3429

    • Permission issue when running docker-compose or docker build as user not having uid 1001 DBZ-3453

    • no viable alternative at input 'DROP TABLE IF EXISTS group' (Galera and MariaDB) DBZ-3467

    • Debezium MySQL connector does not process tables with partitions DBZ-3468

    • The building tools' version in README doc is outdated DBZ-3478

    • MySQL DATE default value parser rejects timestamp DBZ-3497

    • MySQL8 GRANT statement not parsable DBZ-3499

    Other changes

    • Config validation for Db2 DBZ-3118

    • Add smoke test for UI DBZ-3133

    • Create new metric "CapturedTables" DBZ-3161

    • Handle deadlock issue for MySql build stuck for 6h DBZ-3233

    • Document using Connect REST API for log level changes DBZ-3270

    • User Guide corrections for SQL Server connector DBZ-3297

    • User Guide corrections for Db2 connector DBZ-3298

    • User Guide corrections for MySQL connector DBZ-3299

    • User Guide corrections for MongoDB connector DBZ-3300

    • Allow building the Oracle connector on CI DBZ-3365

    • Add tests for Protobuf Converter DBZ-3369

    • Use current SQL Server container image for testing and examples DBZ-3379

    • Reword prereq in downstream SQL Server connector doc DBZ-3392

    • Duplicate entry in MySQL connector properties table for mysql-property-skipped-operations DBZ-3402

    • Docs clarification around tombstone events DBZ-3416

    • Validate logical server name contains only alpha-numerical characters DBZ-3427

    • Provide a "quick" build profile DBZ-3449

    • Avoid warning about superfluous exclusion during packaging DBZ-3458

    • Upgrade binlog client DBZ-3463

    \ No newline at end of file diff --git a/releases/1.7/index.html b/releases/1.7/index.html index 18ab45fc55..e3641da11c 100644 --- a/releases/1.7/index.html +++ b/releases/1.7/index.html @@ -1 +1 @@ - Debezium Release Series 1.7

    stable

    Tested Versions

    Java 11+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.26
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 4.2.1
    PostgreSQL Database: 9.6, 10, 11, 12, 13
    Plug-ins: decoderbufs, wal2json, pgoutput
    Driver: 42.2.22
    Oracle Database: 12c, 19c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.11.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 8.0.x*, 9.0.x
    Driver: 9.0.0
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    1.7.2.Final

    2021-12-14
    Optional placeholder for unavailable CLOB/LOB in Oracle; MySQL driver 8.0.27; Speed of Event Hubs sink improved; Mitigation of CVE-2021-4104 and CVE-2019-17571
    \ No newline at end of file + Debezium Release Series 1.7

    stable

    Tested Versions

    Java 11+
    Kafka Connect 1.x, 2.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.26
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2
    Driver: 4.2.1
    PostgreSQL Database: 9.6, 10, 11, 12, 13
    Plug-ins: decoderbufs, wal2json, pgoutput
    Driver: 42.2.22
    Oracle Database: 12c, 19c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.11.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 8.0.x*, 9.0.x
    Driver: 9.0.0
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    1.7.2.Final

    2021-12-14
    Optional placeholder for unavailable CLOB/LOB in Oracle; MySQL driver 8.0.27; Speed of Event Hubs sink improved; Mitigation of CVE-2021-4104 and CVE-2019-17571
    \ No newline at end of file diff --git a/releases/1.7/release-notes.html b/releases/1.7/release-notes.html index b1753ab7e8..07719dca76 100644 --- a/releases/1.7/release-notes.html +++ b/releases/1.7/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.7

    Release Notes for Debezium 1.7

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.7.2.Final (December 14th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.1 and has been tested with version 2.8.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Oracle connector inserts a placeholder value for unavaliable CLOB/BLOB columns (DBZ-4276).

    New Features

    • Support passing an unavailable placeholder value for CLOB/BLOB column types DBZ-4276

    Fixes

    • None of log files contains offset SCN (SCN offset is no longer available in the online redo logs) DBZ-3635

    • Postgres testsuite hangs on PostgresConnectorIT#exportedSnapshotShouldNotSkipRecordOfParallelTx DBZ-4081

    • Container images for Apache Kafka and ZooKeeper fail to start up DBZ-4160

    • Debezium log miner processes get terminated with ORA-04030 error in idle database environment. DBZ-4204

    • Debezium Metrics not being set correctly DBZ-4222

    • DDL statement couldn’t be parsed DBZ-4224

    • Exception ORA-00310 is not gracefully handled during streaming DBZ-4230

    • Reduce verbosity of logging Oracle memory metrics DBZ-4255

    • Signal based incremental snapshot is failing when launched right after a schema change DBZ-4272

    • [Debezium Server] Event Hubs exporter slow/Event data was too large DBZ-4277

    • ParsingException: DDL statement couldn’t be parsed DBZ-4280

    • some data type is not working for sending signals to a Debezium connector DBZ-4298

    • NullPointerException may be thrown when validating table and column lengths DBZ-4308

    • RelationalChangeRecordEmitter calls "LoggerFactory.getLogger(getClass())" for each instance of the emitter DBZ-4309

    • support for JSON function in MySQL index DBZ-4320

    • Avoid holding table metadata lock in read-only incremental snapshots DBZ-4331

    • Incremental Snapshot does not pick up table DBZ-4343

    • DDL couldn’t be parsed: 'analyze table schema.table estimate statistics sample 5 percent;' DBZ-4396

    • Xstream support with LOB unavailable value placeholder support is inconsistent DBZ-4422

    • Oracle Infinispan buffer fails to serialize unavailable value placeholders DBZ-4425

    Other changes

    • Upgrade to Apicurio Registry 2.0 (QE, docs) DBZ-3629

    • Misc. documentation changes for the Debezium MySQL connector DBZ-3974

    • Promote Outbox SMT to GA DBZ-4012

    • Document incremental chunk size setting DBZ-4127

    • Upgrade MySQL JDBC driver to 8.0.27 DBZ-4286

    • OracleClobDataTypeIT shouldNotStreamAnyChangesWhenLobEraseIsDetected may fail randomly DBZ-4384

    • Optionally assemble Oracle connector distribution without Infinispan DBZ-4446

    • Drop JMS Appender class during container build DBZ-4447

    Release 1.7.1.Final (November 5th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.1 and has been tested with version 2.8.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Store buffered events in separate Infinispan cache DBZ-4159

    • Log warning when table/column name exceeds maximum allowed by LogMiner DBZ-4161

    • Process transaction started/committed in MySQL read-only incremental snapshot DBZ-4197

    Fixes

    • Row hashing in LogMiner Query not able to differentiate between rows of a statement. DBZ-3834

    • The chunk select statement is incorrect for combined primary key in incremental snapshot DBZ-3860

    • Cassandra UUID handling DBZ-3885

    • Infinispan SPI throws NPE with more than one connector configured to the same Oracle database DBZ-4064

    • Debezium Server might contain driver versions pulled from Quarkus DBZ-4070

    • Database history is constantly being reconfigured DBZ-4106

    • Oracle flush table should not contain multiple rows DBZ-4118

    • SQL Server Connector fails to wrap in flat brackets DBZ-4125

    • Oracle Connector DDL Parsing Exception DBZ-4126

    • Debezium deals with Oracle DDL appeared IndexOutOfBoundsException: Index: 0, Size: 0 DBZ-4135

    • Error: PostgresDefaultValueConverter - Cannot parse column default value 'NULL::numeric' to type 'numeric'. Expression evaluation is not supported. DBZ-4137

    • Possible OutOfMemoryError with tracking schema changes DBZ-4151

    • DDL ParsingException - not all table compression modes are supported DBZ-4158

    • Producer failure NullPointerException DBZ-4166

    • DDL statement couldn’t be parsed - Modify Column DBZ-4174

    • OracleSchemaMigrationIT#shouldNotEmitDdlEventsForNonTableObjects fails for Xstream DBZ-4186

    • Certain LogMiner-specific tests are not being skipped while using Xstreams DBZ-4188

    • DML statement couldn’t be parsed DBZ-4194

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira issue with the statement 'DROP TABLE IF EXISTS condition' DBZ-4210

    • Support MySQL Dual Passwords in DDL Parser DBZ-4215

    • CREATE PROCEDURE DDL throws ParsingException DBZ-4229

    • CHAR / NCHAR precision is not correctly derived from DDL statements DBZ-4233

    Other changes

    • Incorrect documentation for message.key.columns DBZ-3437

    • Document awareness of Oracle database tuning DBZ-3880

    • Please fix vulnerabilites DBZ-3926

    • Handle SCN gap DBZ-4036

    • Upgrade to Apache Kafka 2.8.1 DBZ-4108

    • Missing documentation for max.iteration.transactions option DBZ-4129

    • Misc. MongoDB connector docs fixes DBZ-4149

    • Document Oracle buffering solutions DBZ-4157

    Release 1.7.0.Final (September 30th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    For the incubating Debezium Cassandra connector, the field format emitted for UUID columns has changed; instead of using a base64 representation like "AA4AAFcZEqMAAAAAKDJ9Sg==", the more common UUID string representation is used now, such as "000e0000-5719-12a3-0000-000028327d4a" (DBZ-3885.

    New Features

    • DBZ-UI - Provide list of configurations DBZ-3960

    • add ProtobufConverter for Cassandra CDC DBZ-3906

    Fixes

    • java.lang.RuntimeException: com.microsoft.sqlserver.jdbc.SQLServerException: The connection is closed DBZ-3346

    • Oracle connector unable to start in archive only mode DBZ-3712

    • DDL statement couldn’t be parsed DBZ-4026

    • Question about handling Raw column types DBZ-4037

    • Fixing wrong log dir location in Kafka container image DBZ-4048

    • Incremental snapshotting of a table can be prematurely terminated after restart DBZ-4057

    • Documentation - Setting up Db2 - Step 10 (Start the ASN agent) is not accurate DBZ-4044

    • Debezium Server uses MySQL driver version as defined in Quarkus not in Debezium DBZ-4049

    • Events are missed with Oracle connector due to LGWR buffer not being flushed to redo logs DBZ-4067

    • Postgres JDBC Driver version causes connection issues on some cloud Postgres instances DBZ-4060

    • nulls for some MySQL properties in the connector-types backend response DBZ-3108

    Other changes

    • Oracle IncrementalSnapshotIT invalid table test fails DBZ-4040

    • Document how to enable schema for JSON messages DBZ-4041

    • Trigger contributor check action only when PR is opened DBZ-4058

    • Provide JMH benchmark for ChangeEventQueue DBZ-4050

    • Commit message action fails for multi-line commit messages DBZ-4047

    Release 1.7.0.CR2 (September 23rd, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.CR2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.CR2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.CR2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Support read-only MySQL connection in incremental snapshot DBZ-3577

    Fixes

    • Connection failure after snapshot wasn’t executed for a while DBZ-3951

    • Oracle-Connector fails parsing a DDL statement DBZ-3977

    • Oracle connector fails after error ORA-01327 DBZ-4010

    • Incorrect incremental snapshot DDL triggers snapshot that generates unending* inserts against signalling table DBZ-4013

    • Oracle-Connector fails parsing a DDL statement (truncate partition) DBZ-4017

    Other changes

    • Jenkins build node is based on RHEL 8.0 and requires upgrade DBZ-3690

    • Remove GRANT ALTER ANY TABLE from Oracle documentation DBZ-4007

    • Update deploy action configuration for v3 DBZ-4009

    • Website preview via surge.sh DBZ-4011

    • Automate contributor check in COPYRIGHT.txt DBZ-4023

    • Provide an example of usage of snapshot.select.statement.overrides DBZ-3603

    • Throughput Bottleneck and Inefficient Batching in ChangeEventQueue DBZ-3887

    • Performance Bottleneck in TableIdParser String Replacement DBZ-4015

    Release 1.7.0.CR1 (September 16th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The container images for Apache ZooKeeper, Kafka and Connect are now based on Fedora base image (DBZ-3939). This change was introduced to stay synchronized with the latest Java releases.

    A side effect of the Java upgrade is that old unsecure TLS algorithms, namely TLSv1 and TLSv1.1, are disabled by default on the 1.7 container images. If your database cannot accept modern TLS connections, Debezium will throw a SSLHandshakeException and fail to connect.

    If a connection with older algorithms is necessary, then remove the entries "TLSv1" and/or "TLSv1.1" from the jdk.tls.disabledAlgorithms key in the following files of the Debezium container image for Kafka Connect:

    • /etc/crypto-policies/back-ends/java.config

    • /lib/jvm/jre/conf/security/java.security

    New Features

    There are no new features in this release.

    Fixes

    • RedisStreamChangeConsumer - handleBatch - client.xadd should be wrapped with a try catch block DBZ-3713

    • Incorrect information in documentation about supplemental logging DBZ-3776

    • DML statement couldn’t be parsed DBZ-3892

    • DEBEZIUM producer stops unexpectedly trying to change column in table which does not exist DBZ-3898

    • "binary.handling.mode": "hex" setting works incorrectly for values with trailing zeros DBZ-3912

    • System test-suite is unable to work with unreleased Apicurio versions DBZ-3924

    • CI support for running Apicurio registry tests DBZ-3932

    • Incorrect validation of truncate handling mode DBZ-3935

    • protobuf decoder has sends unsigned long as signed for Postgres 13 DBZ-3937

    • Field#description() should return a proper java.lang.String when documentation/description is not set DBZ-3943

    • MySQL example image not working after upgrade to 8.0 DBZ-3944

    • Fix empty high watermark check DBZ-3947

    • Oracle Connector replicating data from all PDBs. Missing PDB filter during replication. DBZ-3954

    • Oracle connector Parsing Exception: DDL statement couldn’t be parsed DBZ-3962

    • FormSwitchComponent not working correctly in case of duplicate STM form DBZ-3968

    • Strings with binary collation shouldn’t be parsed as Types.BINARY by MySqlAntlrDdlParser. DBZ-3969

    • Openshift pods list image preview not found DBZ-3970

    • MySqlValueConvertes.java has typo DBZ-3976

    • Mysql-Connector fails parsing invalid decimal format DDL statement DBZ-3984

    • Connection Factory is not used when validating SQL Server Connector DBZ-4001

    Other changes

    • Promote Outbox SMT to GA DBZ-3584

    • Clarify lifecycle of snapshot metrics DBZ-3613

    • Explore on building non-core repos with corresponding PR branch of core repo and vice-versa DBZ-3748

    • Upgrade to binlog-client 0.25.3 DBZ-3787

    • RelationalSnapshotChangeEventSource should accept a RelationalDatabaseSchema DBZ-3818

    • Create GH Action that flags "octocat" commits DBZ-3822

    • Publish Maven repo with downstream artifacts DBZ-3861

    • CI preparation for Apicurio Registry downstream DBZ-3908

    • Specify branch name on push/pull_request step in all GH action workflows DBZ-3913

    • Consistently order releases from new to old on the website DBZ-3917

    • Update RELEASING.md DBZ-3918

    • Update antora.yml file with new values for SMT attributes DBZ-3922

    • Documentation update should not trigger staging workflow build DBZ-3923

    • Upgrade to Jackson Databind version 2.10.5.1 DBZ-3927

    • Add top-level Transformation menu node for downstream docs DBZ-3931

    • Docker image serving plugin artifacts over HTTP for new Strimzi deployment mechanism DBZ-3934

    • Upgrade MySQL example image to 8.0 DBZ-3936

    • Gracefully handle DB history file stored in a sym-linked directory DBZ-3958

    • Update docs to specify that connectors track metadata only for transactions that occur after deployment DBZ-3961

    • Update and automate Jenkis Node setup DBZ-3965

    • Hyper-link references between options in the Outbox SMT options table DBZ-3920

    • Generify exclusion of columns from snapshotting DBZ-2525

    • PoC for adding transformations / SMT steps to the Debezium UI DBZ-3698

    • Use No match found of pf Empty state component in filter page. DBZ-3888

    • Update the "Skip to review" implementation as per PF new documented standard design pattern DBZ-3916

    • Set up MongoDB 5.0 image DBZ-3973

    Release 1.7.0.Beta1 (August 25th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The MySQL driver was updated to the latest version 8.0.26 (DBZ-3833). This update comes with a new timezone handling and configuration options.

    Detailed information can be found in the MySQL docs.

    New Features

    • Sink adapter for Nats Streaming DBZ-3815

    • Debezium Server’s run.sh startup script fails on msys or cygwin bash DBZ-3840

    • Upgrade Debezium Server Pravega sink to 0.9.1 DBZ-3841

    Fixes

    • Create example for using self-managed Debezium with MK DBZ-2947

    • Exception when validating field.exclude.list for Mongo DB connectors DBZ-3028

    • In case of /api/connectors/1 takes longer time(more than pooling) to fail spinner keeps on loading. DBZ-3313

    • SQL Server CDC event timestamps do not get converted to UTC DBZ-3479

    • Debezium snapshot.select.statement.overrides overrides not used DBZ-3760

    • Server name pattern is unnecessarily restrictive. DBZ-3765

    • Crash when processing MySQL 5.7.28 TIME fields returns empty blob instead of null DBZ-3773

    • Debezium UI and CDC DBZ-3781

    • Disable "Next" if any field value is changed after the validation. DBZ-3783

    • Add DEFAULT to partition option engine DBZ-3784

    • Initiating MongoDB connector causes oplog table scan DBZ-3788

    • SRCFG00014: The config property debezium.sink.pravega.scope is required but it could not be found in any config source DBZ-3792

    • LSN component of Postgres sequence numbers is not updated DBZ-3801

    • Debezium 1.6.1 expecting database.port even when database.url is provided in config. DBZ-3813

    • Postgres numeric default value throwing exception DBZ-3816

    • SQL Server connector doesn’t handle retriable errors during task start DBZ-3823

    • Debezium OpenShift integration test-suite failure DBZ-3824

    • Debezium Server Kinesis Sink Cannot Handle Null Events DBZ-3827

    • Timeout when reading from MongoDB oplog cannot be controlled DBZ-3836

    • Snapshot locking mode "minimal_percona" incorrectly resets transaction & isolation state DBZ-3838

    • Properly skip tests when minor/patch are not specified DBZ-3839

    • Truncate validation should verify key schema is null and not value schema DBZ-3842

    • System test-suite fails if CRD already exist within the cluster DBZ-3846

    • Incorrect test-tags for OcpAvroDB2ConnectorIT DBZ-3851

    • System test-suite CI job does not have RHEL image parameter DBZ-3852

    • Typo with prodname asciidoc attribute usage DBZ-3856

    • SQL Server Connector finds tables for streaming but not snapshot DBZ-3857

    • Signaling table id column too small in example DBZ-3867

    • Oracle unparsable DDL issue DBZ-3877

    • Support AS clause in GRANT statement DBZ-3878

    • Error Parsing Oracle DDL dropping PK DBZ-3886

    • Q3 docs referencing Service Registry 2.0 docs DBZ-3891

    • EMPTY_CLOB() and EMPTY_BLOB() should be treated as empty LOB values DBZ-3893

    • Oracle DDL parsing issue DBZ-3896

    Other changes

    • Debezium UI participating in upstream releases — follow-up DBZ-3169

    • Discuss SMT predicates in docs DBZ-3227

    • Test failure for SqlServerConnectorIT#excludeColumnWhenCaptureInstanceExcludesColumns DBZ-3228

    • Adjust to changed Strimzi CRDs DBZ-3385

    • Create a smoke test for Debezium with Kafka on RHEL DBZ-3387

    • Promote Debezium support on RHEL to GA DBZ-3406

    • Oracle Docs for TP DBZ-3407

    • Upgrade to Kafka 2.8 DBZ-3444

    • Update Debezium on RHEL documentation for GA DBZ-3462

    • Options in outbox router docs not linked DBZ-3649

    • Create Kafka related images based on UBI-8 for RHEL certification DBZ-3650

    • Error in description of the property column.mask.hash.hashAlgorithm.with.salt.salt DBZ-3802

    • Debezium does not provide up-to-date container images DBZ-3809

    • Change DBZ kafka image , so its start script can be used on QA Rhel kafka DBZ-3810

    • Test with Apicurio Registry 2.0 in system level test-suite DBZ-3812

    • Upgrade commons-compress from 1.20 to 1.21 DBZ-3819

    • Update jenkins job configuration to incorporate recent system-testsuite changes DBZ-3825

    • Test Failure - RecordsStreamProducerIT#testEmptyChangesProducesHeartbeat DBZ-3828

    • Upgrade UI proxy connectors to 1.6.1.Final DBZ-3837

    • Improperly constructed links generating downstream build errors DBZ-3858

    • CI Failure in VitessConnectorIT.shouldOutputRecordsInCloudEventsFormat DBZ-3863

    • CI Failure for StreamingSourceIT.shouldFailOnSchemaInconsistency DBZ-3869

    • Extract new top-level menu node for SMTs DBZ-3873

    • Introduce documentation variables for AMQ DBZ-3879

    • Don’t log error when dropping non-existent replication slot in tests DBZ-3889

    • Intermittent test failures on CI: VitessConnectorIT::shouldUseUniqueKeyAsRecordKey DBZ-3900

    • Intermittent test failures on CI: IncrementalSnapshotIT#updatesWithRestart DBZ-3901

    • Test shouldNotEmitDdlEventsForNonTableObjects randomly fails DBZ-3902

    • VOLUME instruction causes issue with recent Docker versions DBZ-3903

    • Provide ability to denote UI order in field metadata DBZ-3904

    • Make relocation.dir and offset.dir configs required. DBZ-2251

    • Create Debezium API Spec Generator and static API definitions for connectors DBZ-3364

    • Improve incremental snapshot metrics DBZ-3688

    • Import Pattern-fly CSS from @patternfly/patternfly DBZ-3779

    • Allow system testsuite to produce Strimzi image for arbitrary released version of Debezium DBZ-3826

    • PostgreSQL - Minor Performance bottleneck in PostgresChangeRecordEmitter DBZ-3870

    • Oracle - Provide a more user-friendly way to update SCN DBZ-3876

    • Test failure on CI - SqlServerConnectorIT#readOnlyApplicationIntent DBZ-2398

    • Test failure for SqlServerConnectorIT#EventProcessingFailureHandlingIT DBZ-3229

    • Remove underscore from Debezium Server NATS sink Java package name DBZ-3910

    • LogMinerDatabaseStateWriter causes a SQLException DBZ-3911

    • Maven release fails due to debezium-testing version handling DBZ-3909

    • Zookeeper image should not use archive.apache.org DBZ-3914

    Release 1.7.0.Alpha1 (July 30th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Implement incremental snapshotting for Oracle DBZ-3692

    • Implement a LogMiner event buffer SPI DBZ-3752

    • Remove artifacts.url property from UI config.js DBZ-3209

    • Do not mark offset for commit log files with error DBZ-3366

    • Support read-only MySQL connection in incremental snapshot DBZ-3577

    • CloudEventsConverter does not support Oracle, Db2, or Vitess DBZ-3668

    • Allow usernames to be excluded in logminer query DBZ-3671

    • Track Oracle session PGA memory consumption DBZ-3756

    • Performance issue due to inefficient ObjectMapper initialization DBZ-3770

    • Add more smoke tests DBZ-3789

    Fixes

    • UI frontend build fails for exported checkout which has no .git dir DBZ-3265

    • Broken links in Avro and Outbox Event Router documentation DBZ-3430

    • Cassandra connector generates invalid schema name for its CDC records DBZ-3590

    • Support invisible columns with MySql 8.0.23+ DBZ-3623

    • Db2Connector is unable to establish validation connection DBZ-3632

    • Status stays in RUNNING for Postgres Connector after Postgres is stopped DBZ-3655

    • Change connection validation log level for better visibility DBZ-3677

    • OracleSchemaMigrationIT can throw false positive test failures if test artifacts remain DBZ-3684

    • MySQL Connector error after execute a "create role" statement DBZ-3686

    • ERROR in Entry module not found: Error: Can’t resolve './src' DBZ-3716

    • Error parsing query, even with database.history.skip.unparseable.ddl DBZ-3717

    • Support for TABLE_TYPE missing form MySQL grammar DBZ-3718

    • Oracle LogMiner DdlParser Error DBZ-3723

    • Debezium mysql connector plugin throws SQL syntax error during incremental snapshot DBZ-3725

    • DDL statement couldn’t be parsed DBZ-3755

    • Debezium Oracle connector stops with DDL parsing error DBZ-3759

    • Exception thrown from getTableColumnsFromDatabase DBZ-3769

    • Incorrect regex parsing in start script of kafka image DBZ-3791

    • Dropdown items list visibility blocked by wizard footer DBZ-3794

    • Permission issues with DB2 example image DBZ-3795

    Other changes

    • Make consumer of outbox example more resilient DBZ-1709

    • Set up CI for debezium-examples repo DBZ-1749

    • Refactor LogMinerHelper and SqlUtils DBZ-2552

    • Implement tests for UI components DBZ-3050

    • Add documentation about new capturing implementation for the MySQL connector to downstream product DBZ-3140

    • Remove JSimpleParser DBZ-3155

    • Ability to build KC image with Apicurio converters DBZ-3433

    • Remove log.mining.history.xxx deprecated options DBZ-3581

    • Un-document deprecated options and metrics DBZ-3681

    • Capture changes made by connector user & document that SYS/SYSTEM changes are not captured DBZ-3683

    • Use Debezium thread factory for PG keep-alive DBZ-3685

    • Time for another community newsletter DBZ-3695

    • Improve signalling documentation DBZ-3699

    • Example end-to-end fails due to an API incompatibility with Maven 3.6+ DBZ-3705

    • Example debezium-server-name-mapper fails due to an API incompatibility with Maven 3.6+ DBZ-3706

    • Doc clarification on connector rewrite DBZ-3711

    • Support RHEL deployments in system-test tooling DBZ-3724

    • Misc. tutorial updates DBZ-3747

    • Update Oracle connector deployment instructions for consistency DBZ-3772

    \ No newline at end of file + Release Notes for Debezium 1.7

    Release Notes for Debezium 1.7

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.7.2.Final (December 14th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.1 and has been tested with version 2.8.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    Oracle connector inserts a placeholder value for unavaliable CLOB/BLOB columns (DBZ-4276).

    New Features

    • Support passing an unavailable placeholder value for CLOB/BLOB column types DBZ-4276

    Fixes

    • None of log files contains offset SCN (SCN offset is no longer available in the online redo logs) DBZ-3635

    • Postgres testsuite hangs on PostgresConnectorIT#exportedSnapshotShouldNotSkipRecordOfParallelTx DBZ-4081

    • Container images for Apache Kafka and ZooKeeper fail to start up DBZ-4160

    • Debezium log miner processes get terminated with ORA-04030 error in idle database environment. DBZ-4204

    • Debezium Metrics not being set correctly DBZ-4222

    • DDL statement couldn’t be parsed DBZ-4224

    • Exception ORA-00310 is not gracefully handled during streaming DBZ-4230

    • Reduce verbosity of logging Oracle memory metrics DBZ-4255

    • Signal based incremental snapshot is failing when launched right after a schema change DBZ-4272

    • [Debezium Server] Event Hubs exporter slow/Event data was too large DBZ-4277

    • ParsingException: DDL statement couldn’t be parsed DBZ-4280

    • some data type is not working for sending signals to a Debezium connector DBZ-4298

    • NullPointerException may be thrown when validating table and column lengths DBZ-4308

    • RelationalChangeRecordEmitter calls "LoggerFactory.getLogger(getClass())" for each instance of the emitter DBZ-4309

    • support for JSON function in MySQL index DBZ-4320

    • Avoid holding table metadata lock in read-only incremental snapshots DBZ-4331

    • Incremental Snapshot does not pick up table DBZ-4343

    • DDL couldn’t be parsed: 'analyze table schema.table estimate statistics sample 5 percent;' DBZ-4396

    • Xstream support with LOB unavailable value placeholder support is inconsistent DBZ-4422

    • Oracle Infinispan buffer fails to serialize unavailable value placeholders DBZ-4425

    Other changes

    • Upgrade to Apicurio Registry 2.0 (QE, docs) DBZ-3629

    • Misc. documentation changes for the Debezium MySQL connector DBZ-3974

    • Promote Outbox SMT to GA DBZ-4012

    • Document incremental chunk size setting DBZ-4127

    • Upgrade MySQL JDBC driver to 8.0.27 DBZ-4286

    • OracleClobDataTypeIT shouldNotStreamAnyChangesWhenLobEraseIsDetected may fail randomly DBZ-4384

    • Optionally assemble Oracle connector distribution without Infinispan DBZ-4446

    • Drop JMS Appender class during container build DBZ-4447

    Release 1.7.1.Final (November 5th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.1 and has been tested with version 2.8.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Store buffered events in separate Infinispan cache DBZ-4159

    • Log warning when table/column name exceeds maximum allowed by LogMiner DBZ-4161

    • Process transaction started/committed in MySQL read-only incremental snapshot DBZ-4197

    Fixes

    • Row hashing in LogMiner Query not able to differentiate between rows of a statement. DBZ-3834

    • The chunk select statement is incorrect for combined primary key in incremental snapshot DBZ-3860

    • Cassandra UUID handling DBZ-3885

    • Infinispan SPI throws NPE with more than one connector configured to the same Oracle database DBZ-4064

    • Debezium Server might contain driver versions pulled from Quarkus DBZ-4070

    • Database history is constantly being reconfigured DBZ-4106

    • Oracle flush table should not contain multiple rows DBZ-4118

    • SQL Server Connector fails to wrap in flat brackets DBZ-4125

    • Oracle Connector DDL Parsing Exception DBZ-4126

    • Debezium deals with Oracle DDL appeared IndexOutOfBoundsException: Index: 0, Size: 0 DBZ-4135

    • Error: PostgresDefaultValueConverter - Cannot parse column default value 'NULL::numeric' to type 'numeric'. Expression evaluation is not supported. DBZ-4137

    • Possible OutOfMemoryError with tracking schema changes DBZ-4151

    • DDL ParsingException - not all table compression modes are supported DBZ-4158

    • Producer failure NullPointerException DBZ-4166

    • DDL statement couldn’t be parsed - Modify Column DBZ-4174

    • OracleSchemaMigrationIT#shouldNotEmitDdlEventsForNonTableObjects fails for Xstream DBZ-4186

    • Certain LogMiner-specific tests are not being skipped while using Xstreams DBZ-4188

    • DML statement couldn’t be parsed DBZ-4194

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira issue with the statement 'DROP TABLE IF EXISTS condition' DBZ-4210

    • Support MySQL Dual Passwords in DDL Parser DBZ-4215

    • CREATE PROCEDURE DDL throws ParsingException DBZ-4229

    • CHAR / NCHAR precision is not correctly derived from DDL statements DBZ-4233

    Other changes

    • Incorrect documentation for message.key.columns DBZ-3437

    • Document awareness of Oracle database tuning DBZ-3880

    • Please fix vulnerabilites DBZ-3926

    • Handle SCN gap DBZ-4036

    • Upgrade to Apache Kafka 2.8.1 DBZ-4108

    • Missing documentation for max.iteration.transactions option DBZ-4129

    • Misc. MongoDB connector docs fixes DBZ-4149

    • Document Oracle buffering solutions DBZ-4157

    Release 1.7.0.Final (September 30th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    For the incubating Debezium Cassandra connector, the field format emitted for UUID columns has changed; instead of using a base64 representation like "AA4AAFcZEqMAAAAAKDJ9Sg==", the more common UUID string representation is used now, such as "000e0000-5719-12a3-0000-000028327d4a" (DBZ-3885.

    New Features

    • DBZ-UI - Provide list of configurations DBZ-3960

    • add ProtobufConverter for Cassandra CDC DBZ-3906

    Fixes

    • java.lang.RuntimeException: com.microsoft.sqlserver.jdbc.SQLServerException: The connection is closed DBZ-3346

    • Oracle connector unable to start in archive only mode DBZ-3712

    • DDL statement couldn’t be parsed DBZ-4026

    • Question about handling Raw column types DBZ-4037

    • Fixing wrong log dir location in Kafka container image DBZ-4048

    • Incremental snapshotting of a table can be prematurely terminated after restart DBZ-4057

    • Documentation - Setting up Db2 - Step 10 (Start the ASN agent) is not accurate DBZ-4044

    • Debezium Server uses MySQL driver version as defined in Quarkus not in Debezium DBZ-4049

    • Events are missed with Oracle connector due to LGWR buffer not being flushed to redo logs DBZ-4067

    • Postgres JDBC Driver version causes connection issues on some cloud Postgres instances DBZ-4060

    • nulls for some MySQL properties in the connector-types backend response DBZ-3108

    Other changes

    • Oracle IncrementalSnapshotIT invalid table test fails DBZ-4040

    • Document how to enable schema for JSON messages DBZ-4041

    • Trigger contributor check action only when PR is opened DBZ-4058

    • Provide JMH benchmark for ChangeEventQueue DBZ-4050

    • Commit message action fails for multi-line commit messages DBZ-4047

    Release 1.7.0.CR2 (September 23rd, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.CR2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.CR2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.CR2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Support read-only MySQL connection in incremental snapshot DBZ-3577

    Fixes

    • Connection failure after snapshot wasn’t executed for a while DBZ-3951

    • Oracle-Connector fails parsing a DDL statement DBZ-3977

    • Oracle connector fails after error ORA-01327 DBZ-4010

    • Incorrect incremental snapshot DDL triggers snapshot that generates unending* inserts against signalling table DBZ-4013

    • Oracle-Connector fails parsing a DDL statement (truncate partition) DBZ-4017

    Other changes

    • Jenkins build node is based on RHEL 8.0 and requires upgrade DBZ-3690

    • Remove GRANT ALTER ANY TABLE from Oracle documentation DBZ-4007

    • Update deploy action configuration for v3 DBZ-4009

    • Website preview via surge.sh DBZ-4011

    • Automate contributor check in COPYRIGHT.txt DBZ-4023

    • Provide an example of usage of snapshot.select.statement.overrides DBZ-3603

    • Throughput Bottleneck and Inefficient Batching in ChangeEventQueue DBZ-3887

    • Performance Bottleneck in TableIdParser String Replacement DBZ-4015

    Release 1.7.0.CR1 (September 16th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The container images for Apache ZooKeeper, Kafka and Connect are now based on Fedora base image (DBZ-3939). This change was introduced to stay synchronized with the latest Java releases.

    A side effect of the Java upgrade is that old unsecure TLS algorithms, namely TLSv1 and TLSv1.1, are disabled by default on the 1.7 container images. If your database cannot accept modern TLS connections, Debezium will throw a SSLHandshakeException and fail to connect.

    If a connection with older algorithms is necessary, then remove the entries "TLSv1" and/or "TLSv1.1" from the jdk.tls.disabledAlgorithms key in the following files of the Debezium container image for Kafka Connect:

    • /etc/crypto-policies/back-ends/java.config

    • /lib/jvm/jre/conf/security/java.security

    New Features

    There are no new features in this release.

    Fixes

    • RedisStreamChangeConsumer - handleBatch - client.xadd should be wrapped with a try catch block DBZ-3713

    • Incorrect information in documentation about supplemental logging DBZ-3776

    • DML statement couldn’t be parsed DBZ-3892

    • DEBEZIUM producer stops unexpectedly trying to change column in table which does not exist DBZ-3898

    • "binary.handling.mode": "hex" setting works incorrectly for values with trailing zeros DBZ-3912

    • System test-suite is unable to work with unreleased Apicurio versions DBZ-3924

    • CI support for running Apicurio registry tests DBZ-3932

    • Incorrect validation of truncate handling mode DBZ-3935

    • protobuf decoder has sends unsigned long as signed for Postgres 13 DBZ-3937

    • Field#description() should return a proper java.lang.String when documentation/description is not set DBZ-3943

    • MySQL example image not working after upgrade to 8.0 DBZ-3944

    • Fix empty high watermark check DBZ-3947

    • Oracle Connector replicating data from all PDBs. Missing PDB filter during replication. DBZ-3954

    • Oracle connector Parsing Exception: DDL statement couldn’t be parsed DBZ-3962

    • FormSwitchComponent not working correctly in case of duplicate STM form DBZ-3968

    • Strings with binary collation shouldn’t be parsed as Types.BINARY by MySqlAntlrDdlParser. DBZ-3969

    • Openshift pods list image preview not found DBZ-3970

    • MySqlValueConvertes.java has typo DBZ-3976

    • Mysql-Connector fails parsing invalid decimal format DDL statement DBZ-3984

    • Connection Factory is not used when validating SQL Server Connector DBZ-4001

    Other changes

    • Promote Outbox SMT to GA DBZ-3584

    • Clarify lifecycle of snapshot metrics DBZ-3613

    • Explore on building non-core repos with corresponding PR branch of core repo and vice-versa DBZ-3748

    • Upgrade to binlog-client 0.25.3 DBZ-3787

    • RelationalSnapshotChangeEventSource should accept a RelationalDatabaseSchema DBZ-3818

    • Create GH Action that flags "octocat" commits DBZ-3822

    • Publish Maven repo with downstream artifacts DBZ-3861

    • CI preparation for Apicurio Registry downstream DBZ-3908

    • Specify branch name on push/pull_request step in all GH action workflows DBZ-3913

    • Consistently order releases from new to old on the website DBZ-3917

    • Update RELEASING.md DBZ-3918

    • Update antora.yml file with new values for SMT attributes DBZ-3922

    • Documentation update should not trigger staging workflow build DBZ-3923

    • Upgrade to Jackson Databind version 2.10.5.1 DBZ-3927

    • Add top-level Transformation menu node for downstream docs DBZ-3931

    • Docker image serving plugin artifacts over HTTP for new Strimzi deployment mechanism DBZ-3934

    • Upgrade MySQL example image to 8.0 DBZ-3936

    • Gracefully handle DB history file stored in a sym-linked directory DBZ-3958

    • Update docs to specify that connectors track metadata only for transactions that occur after deployment DBZ-3961

    • Update and automate Jenkis Node setup DBZ-3965

    • Hyper-link references between options in the Outbox SMT options table DBZ-3920

    • Generify exclusion of columns from snapshotting DBZ-2525

    • PoC for adding transformations / SMT steps to the Debezium UI DBZ-3698

    • Use No match found of pf Empty state component in filter page. DBZ-3888

    • Update the "Skip to review" implementation as per PF new documented standard design pattern DBZ-3916

    • Set up MongoDB 5.0 image DBZ-3973

    Release 1.7.0.Beta1 (August 25th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    The MySQL driver was updated to the latest version 8.0.26 (DBZ-3833). This update comes with a new timezone handling and configuration options.

    Detailed information can be found in the MySQL docs.

    New Features

    • Sink adapter for Nats Streaming DBZ-3815

    • Debezium Server’s run.sh startup script fails on msys or cygwin bash DBZ-3840

    • Upgrade Debezium Server Pravega sink to 0.9.1 DBZ-3841

    Fixes

    • Create example for using self-managed Debezium with MK DBZ-2947

    • Exception when validating field.exclude.list for Mongo DB connectors DBZ-3028

    • In case of /api/connectors/1 takes longer time(more than pooling) to fail spinner keeps on loading. DBZ-3313

    • SQL Server CDC event timestamps do not get converted to UTC DBZ-3479

    • Debezium snapshot.select.statement.overrides overrides not used DBZ-3760

    • Server name pattern is unnecessarily restrictive. DBZ-3765

    • Crash when processing MySQL 5.7.28 TIME fields returns empty blob instead of null DBZ-3773

    • Debezium UI and CDC DBZ-3781

    • Disable "Next" if any field value is changed after the validation. DBZ-3783

    • Add DEFAULT to partition option engine DBZ-3784

    • Initiating MongoDB connector causes oplog table scan DBZ-3788

    • SRCFG00014: The config property debezium.sink.pravega.scope is required but it could not be found in any config source DBZ-3792

    • LSN component of Postgres sequence numbers is not updated DBZ-3801

    • Debezium 1.6.1 expecting database.port even when database.url is provided in config. DBZ-3813

    • Postgres numeric default value throwing exception DBZ-3816

    • SQL Server connector doesn’t handle retriable errors during task start DBZ-3823

    • Debezium OpenShift integration test-suite failure DBZ-3824

    • Debezium Server Kinesis Sink Cannot Handle Null Events DBZ-3827

    • Timeout when reading from MongoDB oplog cannot be controlled DBZ-3836

    • Snapshot locking mode "minimal_percona" incorrectly resets transaction & isolation state DBZ-3838

    • Properly skip tests when minor/patch are not specified DBZ-3839

    • Truncate validation should verify key schema is null and not value schema DBZ-3842

    • System test-suite fails if CRD already exist within the cluster DBZ-3846

    • Incorrect test-tags for OcpAvroDB2ConnectorIT DBZ-3851

    • System test-suite CI job does not have RHEL image parameter DBZ-3852

    • Typo with prodname asciidoc attribute usage DBZ-3856

    • SQL Server Connector finds tables for streaming but not snapshot DBZ-3857

    • Signaling table id column too small in example DBZ-3867

    • Oracle unparsable DDL issue DBZ-3877

    • Support AS clause in GRANT statement DBZ-3878

    • Error Parsing Oracle DDL dropping PK DBZ-3886

    • Q3 docs referencing Service Registry 2.0 docs DBZ-3891

    • EMPTY_CLOB() and EMPTY_BLOB() should be treated as empty LOB values DBZ-3893

    • Oracle DDL parsing issue DBZ-3896

    Other changes

    • Debezium UI participating in upstream releases — follow-up DBZ-3169

    • Discuss SMT predicates in docs DBZ-3227

    • Test failure for SqlServerConnectorIT#excludeColumnWhenCaptureInstanceExcludesColumns DBZ-3228

    • Adjust to changed Strimzi CRDs DBZ-3385

    • Create a smoke test for Debezium with Kafka on RHEL DBZ-3387

    • Promote Debezium support on RHEL to GA DBZ-3406

    • Oracle Docs for TP DBZ-3407

    • Upgrade to Kafka 2.8 DBZ-3444

    • Update Debezium on RHEL documentation for GA DBZ-3462

    • Options in outbox router docs not linked DBZ-3649

    • Create Kafka related images based on UBI-8 for RHEL certification DBZ-3650

    • Error in description of the property column.mask.hash.hashAlgorithm.with.salt.salt DBZ-3802

    • Debezium does not provide up-to-date container images DBZ-3809

    • Change DBZ kafka image , so its start script can be used on QA Rhel kafka DBZ-3810

    • Test with Apicurio Registry 2.0 in system level test-suite DBZ-3812

    • Upgrade commons-compress from 1.20 to 1.21 DBZ-3819

    • Update jenkins job configuration to incorporate recent system-testsuite changes DBZ-3825

    • Test Failure - RecordsStreamProducerIT#testEmptyChangesProducesHeartbeat DBZ-3828

    • Upgrade UI proxy connectors to 1.6.1.Final DBZ-3837

    • Improperly constructed links generating downstream build errors DBZ-3858

    • CI Failure in VitessConnectorIT.shouldOutputRecordsInCloudEventsFormat DBZ-3863

    • CI Failure for StreamingSourceIT.shouldFailOnSchemaInconsistency DBZ-3869

    • Extract new top-level menu node for SMTs DBZ-3873

    • Introduce documentation variables for AMQ DBZ-3879

    • Don’t log error when dropping non-existent replication slot in tests DBZ-3889

    • Intermittent test failures on CI: VitessConnectorIT::shouldUseUniqueKeyAsRecordKey DBZ-3900

    • Intermittent test failures on CI: IncrementalSnapshotIT#updatesWithRestart DBZ-3901

    • Test shouldNotEmitDdlEventsForNonTableObjects randomly fails DBZ-3902

    • VOLUME instruction causes issue with recent Docker versions DBZ-3903

    • Provide ability to denote UI order in field metadata DBZ-3904

    • Make relocation.dir and offset.dir configs required. DBZ-2251

    • Create Debezium API Spec Generator and static API definitions for connectors DBZ-3364

    • Improve incremental snapshot metrics DBZ-3688

    • Import Pattern-fly CSS from @patternfly/patternfly DBZ-3779

    • Allow system testsuite to produce Strimzi image for arbitrary released version of Debezium DBZ-3826

    • PostgreSQL - Minor Performance bottleneck in PostgresChangeRecordEmitter DBZ-3870

    • Oracle - Provide a more user-friendly way to update SCN DBZ-3876

    • Test failure on CI - SqlServerConnectorIT#readOnlyApplicationIntent DBZ-2398

    • Test failure for SqlServerConnectorIT#EventProcessingFailureHandlingIT DBZ-3229

    • Remove underscore from Debezium Server NATS sink Java package name DBZ-3910

    • LogMinerDatabaseStateWriter causes a SQLException DBZ-3911

    • Maven release fails due to debezium-testing version handling DBZ-3909

    • Zookeeper image should not use archive.apache.org DBZ-3914

    Release 1.7.0.Alpha1 (July 30th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.7.0 and has been tested with version 2.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.7.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.7.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.7.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Implement incremental snapshotting for Oracle DBZ-3692

    • Implement a LogMiner event buffer SPI DBZ-3752

    • Remove artifacts.url property from UI config.js DBZ-3209

    • Do not mark offset for commit log files with error DBZ-3366

    • Support read-only MySQL connection in incremental snapshot DBZ-3577

    • CloudEventsConverter does not support Oracle, Db2, or Vitess DBZ-3668

    • Allow usernames to be excluded in logminer query DBZ-3671

    • Track Oracle session PGA memory consumption DBZ-3756

    • Performance issue due to inefficient ObjectMapper initialization DBZ-3770

    • Add more smoke tests DBZ-3789

    Fixes

    • UI frontend build fails for exported checkout which has no .git dir DBZ-3265

    • Broken links in Avro and Outbox Event Router documentation DBZ-3430

    • Cassandra connector generates invalid schema name for its CDC records DBZ-3590

    • Support invisible columns with MySql 8.0.23+ DBZ-3623

    • Db2Connector is unable to establish validation connection DBZ-3632

    • Status stays in RUNNING for Postgres Connector after Postgres is stopped DBZ-3655

    • Change connection validation log level for better visibility DBZ-3677

    • OracleSchemaMigrationIT can throw false positive test failures if test artifacts remain DBZ-3684

    • MySQL Connector error after execute a "create role" statement DBZ-3686

    • ERROR in Entry module not found: Error: Can’t resolve './src' DBZ-3716

    • Error parsing query, even with database.history.skip.unparseable.ddl DBZ-3717

    • Support for TABLE_TYPE missing form MySQL grammar DBZ-3718

    • Oracle LogMiner DdlParser Error DBZ-3723

    • Debezium mysql connector plugin throws SQL syntax error during incremental snapshot DBZ-3725

    • DDL statement couldn’t be parsed DBZ-3755

    • Debezium Oracle connector stops with DDL parsing error DBZ-3759

    • Exception thrown from getTableColumnsFromDatabase DBZ-3769

    • Incorrect regex parsing in start script of kafka image DBZ-3791

    • Dropdown items list visibility blocked by wizard footer DBZ-3794

    • Permission issues with DB2 example image DBZ-3795

    Other changes

    • Make consumer of outbox example more resilient DBZ-1709

    • Set up CI for debezium-examples repo DBZ-1749

    • Refactor LogMinerHelper and SqlUtils DBZ-2552

    • Implement tests for UI components DBZ-3050

    • Add documentation about new capturing implementation for the MySQL connector to downstream product DBZ-3140

    • Remove JSimpleParser DBZ-3155

    • Ability to build KC image with Apicurio converters DBZ-3433

    • Remove log.mining.history.xxx deprecated options DBZ-3581

    • Un-document deprecated options and metrics DBZ-3681

    • Capture changes made by connector user & document that SYS/SYSTEM changes are not captured DBZ-3683

    • Use Debezium thread factory for PG keep-alive DBZ-3685

    • Time for another community newsletter DBZ-3695

    • Improve signalling documentation DBZ-3699

    • Example end-to-end fails due to an API incompatibility with Maven 3.6+ DBZ-3705

    • Example debezium-server-name-mapper fails due to an API incompatibility with Maven 3.6+ DBZ-3706

    • Doc clarification on connector rewrite DBZ-3711

    • Support RHEL deployments in system-test tooling DBZ-3724

    • Misc. tutorial updates DBZ-3747

    • Update Oracle connector deployment instructions for consistency DBZ-3772

    \ No newline at end of file diff --git a/releases/1.8/index.html b/releases/1.8/index.html index 113e780249..b16475780c 100644 --- a/releases/1.8/index.html +++ b/releases/1.8/index.html @@ -1 +1 @@ - Debezium Release Series 1.8

    stable

    Tested Versions

    Java 11+
    Kafka Connect 1.x, 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.27
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2, 4.4, 5.0
    Driver: 4.3.3
    PostgreSQL Database: 9.6, 10, 11, 12, 13, 14
    Plug-ins: decoderbufs, wal2json, pgoutput
    Driver: 42.2.24
    Oracle Database: 12c, 19c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.11.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    1.8.0.Final

    2021-12-16

    1.8.0.CR1

    2021-12-09
    Incremental snapshot for MongoDB connector; Last steps for change streams based MongoDB mode; Truncate table and binary handling mode for Oracle connector; Debezium Kafka Connect REST Extension

    1.8.0.Beta1

    2021-11-28
    Support for pg_logical_emit_message in PostgreSQL connector; Enhanced interval type support in Oracle connector; Infinispan cache config is more extensible for Oracle connector

    1.8.0.Alpha2

    2021-11-11
    Multiple Infinispan caches for Oracle connector; Default value stored in JSON-based schema history; Heartbeat action queries for MySQL; Configurable transaction topic name
    \ No newline at end of file + Debezium Release Series 1.8

    stable

    Tested Versions

    Java 11+
    Kafka Connect 1.x, 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.27
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2, 4.4, 5.0
    Driver: 4.3.3
    PostgreSQL Database: 9.6, 10, 11, 12, 13, 14
    Plug-ins: decoderbufs, wal2json, pgoutput
    Driver: 42.2.24
    Oracle Database: 12c, 19c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0
    SQL Server Database: 2017, 2019
    Driver: 7.2.2.jre8
    Cassandra Database: 3.11.4
    Driver: 3.11.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    1.8.0.Final

    2021-12-16

    1.8.0.CR1

    2021-12-09
    Incremental snapshot for MongoDB connector; Last steps for change streams based MongoDB mode; Truncate table and binary handling mode for Oracle connector; Debezium Kafka Connect REST Extension

    1.8.0.Beta1

    2021-11-28
    Support for pg_logical_emit_message in PostgreSQL connector; Enhanced interval type support in Oracle connector; Infinispan cache config is more extensible for Oracle connector

    1.8.0.Alpha2

    2021-11-11
    Multiple Infinispan caches for Oracle connector; Default value stored in JSON-based schema history; Heartbeat action queries for MySQL; Configurable transaction topic name
    \ No newline at end of file diff --git a/releases/1.8/release-notes.html b/releases/1.8/release-notes.html index 78dbdbf94b..87284bf203 100644 --- a/releases/1.8/release-notes.html +++ b/releases/1.8/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.8

    Release Notes for Debezium 1.8

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.8.1.Final (February 9th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.0.0 and has been tested with version 3.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There were changes to the format of the Infinispan cache to support the storage of certain large object (LOB) events. If you use the Infinispan buffer cache implementation and enabled LOB support, the cache files must be deleted and a new snapshot taken due to these compatibility changes (DBZ-4366).

    The Debezium container images for Apache Kafka and Kafka Connect contain the log4j 1.x library, which is a runtime dependency of Kafka (it is not used in any way by Debezium). Several vulnerabilities were recently reported against some classes contained in that library. While these classes are used by neither Kafka (Connect) nor Debezium, the class files org/apache/log4j/net/JMSSink.class, org/apache/log4j/jdbc/, and /org/apache/log4j/chainsaw/ have been removed from the log4j 1.x JAR shipped with these container images as a measure of caution. If you actually need these classes, you should obtain the original log4j 1.x JAR and add this via custom images you derive from the Debezium ones. We advise against doing this though as per aforementioned vulnerabilities (DBZ-4568).

    New features

    • Mining session stopped due to 'No more data to read from socket' DBZ-4536

    • Unsupported column type 'ROWID' error DBZ-4595

    Fixes

    • MysqlSourceConnector issue with latin1 tables DBZ-3700

    • Oracle Logminer: snapshot→stream switch misses DB changes in ongoing transactions DBZ-4367

    • DDL statement couldn’t be parsed DBZ-4485

    • Extra file checker-qual in PostgreSQL package DBZ-4507

    • Add backend errors among retriable for Postgres connector DBZ-4520

    • LogMinerHelperIT fails when running Oracle CI with a fresh database DBZ-4542

    • Oracle-Connector fails parsing a DDL statement (VIRTUAL keyword) DBZ-4546

    • io.debezium.text.ParsingException when column name is 'seq' DBZ-4553

    • MySQL FLUSH TABLE[S] with empty table list not handled DBZ-4561

    • Oracle built-in schema exclusions should also apply to DDL changes DBZ-4567

    • mongo-source-connector config database.include.list does not work DBZ-4575

    • Incremental snapshots does not honor column case sensitivity DBZ-4584

    • Oracle connector can’t find the SCN DBZ-4597

    • nCaused by: Multiple parsing errors\nio.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira DBZ-4609

    • Parse including keyword column table ddl error DBZ-4640

    • SQL Server ad-hoc snapshot - SnapshotType is case sensitive DBZ-4648

    • DDL parsing issue: ALTER TABLE …​ MODIFY PARTITION …​ DBZ-4649

    • Mark incompatible Xstream tests as LogMiner only DBZ-4650

    • DDL statement couldn’t be parsed mismatched input '`encrypted` DBZ-4661

    • DDL parsing exception DBZ-4675

    Other changes

    • Create downstream documentation for incremental snapshotting DBZ-3457

    • Update downstream docs in regards to deprecated elements DBZ-3881

    • Update connector documentation to reflect new deployment mechanism DBZ-3991

    • Broken links to the Transaction metadata topics from descriptions for provide.transaction.metadata property DBZ-3997

    • Point to supported versions in connector pages DBZ-4300

    • REST extension tests must not depend on source code version DBZ-4466

    • snapshotPreceededBySchemaChange should not be tested for Db2 DBZ-4467

    • PostgresShutdownIT must not depend on Postgres version DBZ-4469

    • Remove INTERNAL_KEY_CONVERTER and INTERNAL_VALUE_CONVERTER env vars DBZ-4514

    • Bump protobuf version to the latest 3.x DBZ-4527

    • Fix links to connector incremental snapshots topic DBZ-4552

    • Doc updates to address downstream build issues DBZ-4563

    • Older degree of parallelism DDL syntax causes parsing exception DBZ-4571

    • Conditionalize note about outbox event router incompatibility DBZ-4573

    • Update description of snapshot.mode in postgresql.adoc DBZ-4574

    • Update shared UG deployment file to clarify that connectors can use existing KC instance DBZ-4582

    • Server transformation properties should refer to "type" rather than "class" DBZ-4613

    • Oracle DDL parser failure with supplemental log group clause with a custom name DBZ-4654

    • Upgrade postgres driver to version 42.3.2 DBZ-4658

    • Make sure right protoc version is applied DBZ-4668

    • MongoUtilIT test failure - unable to connect to primary DBZ-4676

    • Update shared UG deployment file for use with downstream OCP Install Guide DBZ-4700

    • Indicate ROWID is not supported by XStream DBZ-4702

    Release 1.8.0.Final (December 16th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 3.0.0 and has been tested with version 3.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    The incubating SMT for extracting the new document state from MongoDB (link to https://debezium.io/documentation/reference/stable/transformations/mongodb-event-flattening.html) change events propagates the document’s id within a field named _id within the change event value (DBZ-4413). This is to avoid collisions with existing other document fields with the previously used name id.

    New Features

    • Allow to configure custom Hibernate user types for Quarkus outbox extension DBZ-3552

    • Create a Debezium schema generator for Debezium connectors (initial work) DBZ-4393

    Fixes

    • Outbox Event Router not working in Oracle Connector DBZ-3940

    • some data type is not working for sending signals to a Debezium connector DBZ-4298

    • Debezium UI - Connector create fails if topic group defaults not specified DBZ-4378

    Other changes

    • Intermittent test failure: SqlServerChangeTableSetIT#readHistoryAfterRestart() DBZ-3306

    • Upgrade to Apicurio Registry 2.0 (QE, docs) DBZ-3629

    • Oracle upstream tests in internal CI DBZ-4185

    • Document MongoDB source format DBZ-4420

    • Missing log message for snapshot.locking.mode = none DBZ-4426

    • Caching not working in formatting job DBZ-4429

    • Optionally assemble Oracle connector distribution without Infinispan DBZ-4446

    • Simplify the implementation of method duration in debezium/util/Strings.java DBZ-4423

    • Exclude log4j from Debezium Server distribution in 1.8 DBZ-4452

    Release 1.8.0.CR1 (December 9th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 3.0.0 and has been tested with version 3.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    The Debezium connector for Vitess exposes a new attribute, "keyspace", in the "source" block of emitted change events (DBZ-4412).

    Vitess connector: The mapping for UINT64 columns has been changed from INT64 to String, so as to avoid a value overflow for values larger than 2^63 (DBZ-4403).

    The source block structure of change events emitted by the incubating Debezium connector for Vitess has been adjusted (DBZ-4428):

    • The obsolete attribute "schema" has been removed

    • The attribute "db" is now always empty; the keyspace from which a change event originated, can be retrieved via the new "keyspace" attribute

    New Features

    • Implement incremental snapshotting for MongoDB DBZ-3342

    • Add schema descriptors for the UI JSON Schema for SQL Server Connector DBZ-3697

    • Optionally add OPTION(RECOMPILE) to incremental snapshot queries DBZ-4249

    • Log count of changed records sent DBZ-4341

    • Add support for truncate in oracle connector DBZ-4385

    • Support binary_handling_mode for Oracle connector DBZ-4400

    • Enforce consistent vgtid representation in vitess connector DBZ-4409

    Fixes

    • Parallel write can be lost during catch-up phase DBZ-2792

    • None of log files contains offset SCN (SCN offset is no longer available in the online redo logs) DBZ-3635

    • [Debezium Server] Event Hubs exporter slow/Event data was too large DBZ-4277

    • NullPointer exception on Final stage of snapshot for Oracle connector DBZ-4376

    • Oracle pipeline matrix docker conflict DBZ-4377

    • System testsuite unable to pull apicurio operator from quay DBZ-4382

    • Oracle DDL Parser Error DBZ-4388

    • DDL couldn’t be parsed: 'analyze table schema.table estimate statistics sample 5 percent;' DBZ-4396

    • MySQL: DDL Statement could not be parsed 'GRANT' DBZ-4397

    • Support keyword CHAR SET for defining charset options DBZ-4402

    • Xstream support with LOB unavailable value placeholder support is inconsistent DBZ-4422

    • Oracle Infinispan buffer fails to serialize unavailable value placeholders DBZ-4425

    • VStream gRPC connection closed after being idle for a few minutes DBZ-4389

    Other changes

    • Oracle testing in system-level testsuite DBZ-3963

    • Upgrade to Quarkus 2.5.0.Final DBZ-4035

    • Document incremental chunk size setting DBZ-4127

    • Complete CDC implementation based on MongoDB Change Streams DBZ-4205

    • Record video demo showing Kafka topics creation and transformation UIs DBZ-4260

    • Add Oracle 12.2.0.1 to internal CI Oracle job DBZ-4322

    • OracleClobDataTypeIT shouldNotStreamAnyChangesWhenLobEraseIsDetected may fail randomly DBZ-4384

    • Upgrade impsort-maven-plugin from 1.6.0 to 1.6.2 DBZ-4386

    • Upgrade formatter-maven-plugin from 2.15.0 to 2.16.0 DBZ-4387

    • Unstable test for online DDL changes DBZ-4391

    • Create Debezium Kafka Connect REST Extension DBZ-4028

    Release 1.8.0.Beta1 (November 30th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 3.0.0 and has been tested with version 3.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Vitess connector no longer depends on vtcld (DBZ-4324).

    Oracle connector inserts a placeholder value for unavaliable CLOB/BLOB columns (DBZ-4276).

    New Features

    • Support pg_logical_emit_message DBZ-2363

    • Outbox Event Router for MongoDB DBZ-3528

    • Improve interval type support in Oracle DBZ-1539

    • money data type should be controlled by decimal.handling.mode DBZ-1931

    • Support for Multiple Databases per SQL Server Connector DBZ-2975

    • Debezium server stops with wrong exit code (0) DBZ-3570

    • Change Debezium UI configurations property names DBZ-4066

    • Extend configuration support for Infinispan caches DBZ-4169

    • Support schema changes during incremental snapshot DBZ-4196

    • Handle login failure during instance upgrade as retriable DBZ-4285

    • Modify the type of aggregateid in MongoDB Outbox Event Router DBZ-4318

    • Explicit the MS SQL Materialized view limitation DBZ-4330

    Fixes

    • PostgresConnector does not allow a numeric slot name DBZ-1042

    • False empty schema warning for snapshot mode never DBZ-1344

    • Tutorial shows incorrectly shows "op": "c" for initial change events DBZ-3786

    • SQL Server fails to read CDC events if there is a schema change ahead DBZ-3992

    • Once user click on "Review and finish" button that step in link in not enabled in wizard side menu. DBZ-4119

    • DDL statement couldn’t be parsed DBZ-4224

    • The lastOffset variable in MySqlStreamingChangeEventSource is always null DBZ-4225

    • Unknown entity: io.debezium.outbox.quarkus.internal.OutboxEvent DBZ-4232

    • Signal based incremental snapshot is failing when launched right after a schema change DBZ-4272

    • SQL Server connector doesn’t handle multiple capture instances for the same table with equal start LSN DBZ-4273

    • Debezium UI - some issues with browser support for replaceAll DBZ-4274

    • AbstractDatabaseHistory.java has typo DBZ-4275

    • OracleConnectorIT - two tests fail when using Xstream DBZ-4279

    • ParsingException: DDL statement couldn’t be parsed DBZ-4280

    • Topic Group UI step does not refresh correctly after setting properties DBZ-4293

    • Add MariaDB specific username for MySQL parser DBZ-4304

    • NullPointerException may be thrown when validating table and column lengths DBZ-4308

    • RelationalChangeRecordEmitter calls "LoggerFactory.getLogger(getClass())" for each instance of the emitter DBZ-4309

    • support for JSON function in MySQL index DBZ-4320

    • Avoid holding table metadata lock in read-only incremental snapshots DBZ-4331

    • Convert mysql time type default value error DBZ-4334

    • Wrong configuration option name for MongoDB Outbox SMT DBZ-4337

    • Incremental Snapshot does not pick up table DBZ-4343

    • Oracle connector - Cannot parse column default value 'NULL ' to type '2' DBZ-4360

    Other changes

    • Add canonical URL links to older doc versions DBZ-3897

    • Set up testing job for MongoDB 5.0 DBZ-3938

    • Misc. documentation changes for the Debezium MySQL connector DBZ-3974

    • Promote Outbox SMT to GA DBZ-4012

    • Test failure: SchemaHistoryTopicIT::schemaChangeAfterSnapshot() DBZ-4082

    • Jenkins job for creating image snapshot used by new Jenkins nodes DBZ-4122

    • Use SMT/Transformation UI backend endpoint DBZ-4146

    • Create GH Action for tearing down abandoned website preview environments DBZ-4214

    • Unify Memory and Infinispan event processor implementations DBZ-4236

    • Update system-level testsuite CI job DBZ-4267

    • Upgrade MySQL JDBC driver to 8.0.27 DBZ-4286

    • Only build debezium-core and dependences in cross-repo builds DBZ-4289

    • Reduce log verbosity DBZ-4291

    • Vitess connector should expose vstream flags DBZ-4295

    • Vitess connector should allow client to config starting VGTID DBZ-4297

    • Layout glitch on docs landing page DBZ-4299

    • Provide outbox routing example for MongoDB DBZ-4302

    • Fix wrong option names in examples DBZ-4311

    • Update functional test CI to work with downstream source archive DBZ-4316

    • Provide example showing usage of remote Infinispan cache DBZ-4326

    • Provide CI for MongoDB 4.4 DBZ-4327

    • Test case for schema migration in Vitess connector DBZ-4353

    • Enable transaction metadata for vitess connector DBZ-4355

    • io.debezium.data.VerifyRecord.isValid(SourceRecord) is a no-op DBZ-4364

    • SignalsIT times out after 6h on CI DBZ-4370

    • Document incremental chunk size setting DBZ-4127

    Release 1.8.0.Alpha2 (November 11th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Previous versions stored buffered events in an Infinispan cache as a part of transaction data that could lead to memory issues upon load. This version separates transaction metadata and buffered events in different caches so the data is not loaded with transaction metadata processing (DBZ-4159).

    New Features

    • TableChangesSerializer ignored defaultValue and enumValues DBZ-3966

    • Support for heartbeat action queries for MySQL DBZ-4029

    • Expose the transaction topicname as a config DBZ-4077

    • Improvement to the topic creation step DBZ-4172

    • Process transaction started/committed in MySQL read-only incremental snapshot DBZ-4197

    • Ability to use base image from authenticated registry with KC build mechanism DBZ-4227

    • Remove SqlServerConnector database.user Required Validator DBZ-4231

    • Specify database hot name as 0.0.0.0 for Oracle connector tests CI DBZ-4242

    • Suport all charsets in MySQL parser DBZ-4261

    Fixes

    • "table" is null for table.include.list and column.include.list DBZ-3611

    • Debezium server crashes when deleting a record from a SQLServer table (redis sink) DBZ-3708

    • Invalid default value error on captured table DDL with default value DBZ-3710

    • Incremental snapshot doesn’t work without primary key DBZ-4107

    • Error: PostgresDefaultValueConverter - Cannot parse column default value 'NULL::numeric' to type 'numeric'. Expression evaluation is not supported. DBZ-4137

    • Container images for Apache Kafka and ZooKeeper fail to start up DBZ-4160

    • Debezium 1.7 image disables unsecure algorithms. Breaks unpatched databases DBZ-4167

    • DDL statement couldn’t be parsed - Modify Column DBZ-4174

    • DML statement couldn’t be parsed DBZ-4194

    • Debezium log miner processes get terminated with ORA-04030 error in idle database environment. DBZ-4204

    • DDL with Oracle SDO_GEOMETRY cannot be parsed DBZ-4206

    • DDL with Oracle sequence as default for primary key fails schema generation DBZ-4208

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira issue with the statement 'DROP TABLE IF EXISTS condition' DBZ-4210

    • Support MySQL Dual Passwords in DDL Parser DBZ-4215

    • Debezium Metrics not being set correctly DBZ-4222

    • CREATE PROCEDURE DDL throws ParsingException DBZ-4229

    • Exception ORA-00310 is not gracefully handled during streaming DBZ-4230

    • CHAR / NCHAR precision is not correctly derived from DDL statements DBZ-4233

    • Oracle connector parses NUMBER(*,0) as NUMBER(0,0) in DDL DBZ-4240

    • Signal based incremental snapshot is failing if database name contains dash DBZ-4244

    • SQL Server connector doesn’t handle retriable errors during database state transitions DBZ-4245

    • Does Debezium support database using charset GB18030? DBZ-4246

    • Broken anchors in Debezium Documentation DBZ-4254

    • Reduce verbosity of logging Oracle memory metrics DBZ-4255

    • When Debezium executes select * in the snapshot phase, it does not catch the sql exception, resulting in confusing exceptions and logs DBZ-4257

    Other changes

    • Rename "master" branches to "main" for remaining repos DBZ-3626

    • Support Oracle Logminer docker image in system level test-suite DBZ-3929

    • Missing documentation for max.iteration.transactions option DBZ-4129

    • Use topic auto-creation UI backend endpoint DBZ-4148

    • Remove superfluous build triggers DBZ-4200

    • Tag debezium/tooling:1.2 version DBZ-4238

    • Rework MySqlTimestampColumnIT test DBZ-4241

    • Remove unused code DBZ-4252

    • Optimize tooling image DBZ-4258

    • Change DB2 image in testsuite to use private registry DBZ-4268

    Release 1.8.0.Alpha1 (October 27th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Provide MongoDB CDC implementation based on 4.0 change streams DBZ-435

    • No option fullDocument for the connection to MongoDB oplog.rs DBZ-1847

    • Make antora playbook_author.yml use current branch DBZ-2546

    • Support Kerberos for Debezium MS SQL plugin DBZ-3517

    • Make "snapshot.include.collection.list" case insensitive like "table.include.list" DBZ-3895

    • Exclude usernames at transaction level DBZ-3978

    • [oracle] Add the SCHEMA_ONLY_RECOVERY snapshot mode DBZ-3986

    • Support parse table and columns comment DBZ-4000

    • Upgrade postgres JDBC driver to version 42.2.24 DBZ-4046

    • Support JSON logging formatting DBZ-4114

    • Upgrade mysql-binlog-connector-java to v0.25.4 DBZ-4152

    • Wrong class name in SMT predicates documentation DBZ-4153

    • Log warning when table/column name exceeds maximum allowed by LogMiner DBZ-4161

    • Add Redis to debezium-server-architecture.png DBZ-4190

    • wrong variable naming in an unit test for Outbox Event Router SMT DBZ-4191

    • MongoDB connector support user defined topic delimiter DBZ-4192

    • Parse the "window" keyword for agg and nonagg function in mysql8 DBZ-4193

    • wrong field on change event message example in MongoDB Connector documentation DBZ-4201

    • Add a backend service for UI to fetch the SMT and topic auto-creation configuration properties DBZ-3874

    Fixes

    • Debezium build is unstable for Oracle connector DBZ-3807

    • Row hashing in LogMiner Query not able to differentiate between rows of a statement. DBZ-3834

    • The chunk select statement is incorrect for combined primary key in incremental snapshot DBZ-3860

    • Crash processing MariaDB DATETIME fields returns empty blob instead of null (Snapshotting with useCursorFetch option) DBZ-4032

    • column.the mask.hash.hashAlgorithm.with…​. data corruption occurs when using this feature DBZ-4033

    • Compilation of MySQL grammar displays warnings DBZ-4034

    • Infinispan SPI throws NPE with more than one connector configured to the same Oracle database DBZ-4064

    • Extra double quotes on Kafka message produced by Quarkus Outbox Extension DBZ-4068

    • Debezium Server might contain driver versions pulled from Quarkus DBZ-4070

    • Connection failure while reading chunk during incremental snapshot DBZ-4078

    • Postgres 12/13 images are not buildable DBZ-4080

    • Postgres testsuite hangs on PostgresConnectorIT#exportedSnapshotShouldNotSkipRecordOfParallelTx DBZ-4081

    • CloudEventsConverter omits payload data of deleted documents DBZ-4083

    • Database history is constantly being reconfigured DBZ-4106

    • projectId not being set when injecting a custom PublisherBuilder DBZ-4111

    • Oracle flush table should not contain multiple rows DBZ-4118

    • Can’t parse DDL for View DBZ-4121

    • SQL Server Connector fails to wrap in flat brackets DBZ-4125

    • Oracle Connector DDL Parsing Exception DBZ-4126

    • Debezium deals with Oracle DDL appeared IndexOutOfBoundsException: Index: 0, Size: 0 DBZ-4135

    • Oracle connector throws NPE during streaming in archive only mode DBZ-4140

    • debezium-api and debezium-core jars missing in NIGHTLY Kafka Connect container image libs dir DBZ-4147

    • Trim numerical defaultValue before converting DBZ-4150

    • Possible OutOfMemoryError with tracking schema changes DBZ-4151

    • DDL ParsingException - not all table compression modes are supported DBZ-4158

    • Producer failure NullPointerException DBZ-4166

    • DDL Statement couldn’t be parsed DBZ-4170

    • In multiple connect clusters monitoring, no matter which cluster is selected from the dropdown list, the detailed information is always for the first cluster. DBZ-4181

    • Remove MINUSMINUS operator DBZ-4184

    • OracleSchemaMigrationIT#shouldNotEmitDdlEventsForNonTableObjects fails for Xstream DBZ-4186

    • Certain LogMiner-specific tests are not being skipped while using Xstreams DBZ-4188

    • Missing debezium/postgres:14-alpine in Docker Hub DBZ-4195

    • nulls for some MySQL properties in the connector-types backend response DBZ-3108

    Other changes

    • Test with new deployment mechanism in AMQ Streams DBZ-1777

    • Incorrect documentation for message.key.columns DBZ-3437

    • Re-enable building PostgreSQL alpine images DBZ-3691

    • Upgrade to Quarkus 2.2.3.Final DBZ-3785

    • Document awareness of Oracle database tuning DBZ-3880

    • Publish website-builder and tooling images once per week DBZ-3907

    • Intermittent test failure on CI - RecordsStreamProducerIT#shouldReceiveHeartbeatAlsoWhenChangingNonWhitelistedTable() DBZ-3919

    • Please fix vulnerabilites DBZ-3926

    • Error processing binlog event DBZ-3989

    • Upgrade Java version for GH actions DBZ-3993

    • Replace hard-coded version of MySQL example image with getStableVersion() DBZ-4005

    • Handle SCN gap DBZ-4036

    • Upgrade to Apache Kafka 3.0 DBZ-4045

    • Recreate webhook for linking PRs to JIRA issues DBZ-4065

    • Recipient email address should be a variable in all Jenkins jobs DBZ-4071

    • Allow [ci] tag as commit message prefix DBZ-4073

    • Debezium Docker build job fails on rate limiter DBZ-4074

    • Add Postgresql 14 container image (Alpine) DBZ-4075

    • Add Postgresql 14 container image DBZ-4079

    • Fail Docker build scripts on error DBZ-4084

    • Display commit SHA in page footer DBZ-4110

    • Handle large comparisons results from GH API to address missing authors in release workflow DBZ-4112

    • Add debezium-connect-rest-extension module to GH workflows DBZ-4113

    • Display commit SHA in documentation footer DBZ-4123

    • Add Debezium Kafka Connect REST Extension to Debezium Kafka Connect NIGHTLY container image DBZ-4128

    • Migrate from Gitter to Zulip DBZ-4142

    • Postgres module build times out after 6h on CI DBZ-4145

    • Misc. MongoDB connector docs fixes DBZ-4149

    • Document Oracle buffering solutions DBZ-4157

    • Close open file handle DBZ-4164

    • Outreach jobs should test all connectors DBZ-4165

    • Broken link in MySQL docs DBZ-4199

    • Expose outbox event structure at level of Kafka Connect messages DBZ-1297

    \ No newline at end of file + Release Notes for Debezium 1.8

    Release Notes for Debezium 1.8

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.8.1.Final (February 9th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.0.0 and has been tested with version 3.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There were changes to the format of the Infinispan cache to support the storage of certain large object (LOB) events. If you use the Infinispan buffer cache implementation and enabled LOB support, the cache files must be deleted and a new snapshot taken due to these compatibility changes (DBZ-4366).

    The Debezium container images for Apache Kafka and Kafka Connect contain the log4j 1.x library, which is a runtime dependency of Kafka (it is not used in any way by Debezium). Several vulnerabilities were recently reported against some classes contained in that library. While these classes are used by neither Kafka (Connect) nor Debezium, the class files org/apache/log4j/net/JMSSink.class, org/apache/log4j/jdbc/, and /org/apache/log4j/chainsaw/ have been removed from the log4j 1.x JAR shipped with these container images as a measure of caution. If you actually need these classes, you should obtain the original log4j 1.x JAR and add this via custom images you derive from the Debezium ones. We advise against doing this though as per aforementioned vulnerabilities (DBZ-4568).

    New features

    • Mining session stopped due to 'No more data to read from socket' DBZ-4536

    • Unsupported column type 'ROWID' error DBZ-4595

    Fixes

    • MysqlSourceConnector issue with latin1 tables DBZ-3700

    • Oracle Logminer: snapshot→stream switch misses DB changes in ongoing transactions DBZ-4367

    • DDL statement couldn’t be parsed DBZ-4485

    • Extra file checker-qual in PostgreSQL package DBZ-4507

    • Add backend errors among retriable for Postgres connector DBZ-4520

    • LogMinerHelperIT fails when running Oracle CI with a fresh database DBZ-4542

    • Oracle-Connector fails parsing a DDL statement (VIRTUAL keyword) DBZ-4546

    • io.debezium.text.ParsingException when column name is 'seq' DBZ-4553

    • MySQL FLUSH TABLE[S] with empty table list not handled DBZ-4561

    • Oracle built-in schema exclusions should also apply to DDL changes DBZ-4567

    • mongo-source-connector config database.include.list does not work DBZ-4575

    • Incremental snapshots does not honor column case sensitivity DBZ-4584

    • Oracle connector can’t find the SCN DBZ-4597

    • nCaused by: Multiple parsing errors\nio.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira DBZ-4609

    • Parse including keyword column table ddl error DBZ-4640

    • SQL Server ad-hoc snapshot - SnapshotType is case sensitive DBZ-4648

    • DDL parsing issue: ALTER TABLE …​ MODIFY PARTITION …​ DBZ-4649

    • Mark incompatible Xstream tests as LogMiner only DBZ-4650

    • DDL statement couldn’t be parsed mismatched input '`encrypted` DBZ-4661

    • DDL parsing exception DBZ-4675

    Other changes

    • Create downstream documentation for incremental snapshotting DBZ-3457

    • Update downstream docs in regards to deprecated elements DBZ-3881

    • Update connector documentation to reflect new deployment mechanism DBZ-3991

    • Broken links to the Transaction metadata topics from descriptions for provide.transaction.metadata property DBZ-3997

    • Point to supported versions in connector pages DBZ-4300

    • REST extension tests must not depend on source code version DBZ-4466

    • snapshotPreceededBySchemaChange should not be tested for Db2 DBZ-4467

    • PostgresShutdownIT must not depend on Postgres version DBZ-4469

    • Remove INTERNAL_KEY_CONVERTER and INTERNAL_VALUE_CONVERTER env vars DBZ-4514

    • Bump protobuf version to the latest 3.x DBZ-4527

    • Fix links to connector incremental snapshots topic DBZ-4552

    • Doc updates to address downstream build issues DBZ-4563

    • Older degree of parallelism DDL syntax causes parsing exception DBZ-4571

    • Conditionalize note about outbox event router incompatibility DBZ-4573

    • Update description of snapshot.mode in postgresql.adoc DBZ-4574

    • Update shared UG deployment file to clarify that connectors can use existing KC instance DBZ-4582

    • Server transformation properties should refer to "type" rather than "class" DBZ-4613

    • Oracle DDL parser failure with supplemental log group clause with a custom name DBZ-4654

    • Upgrade postgres driver to version 42.3.2 DBZ-4658

    • Make sure right protoc version is applied DBZ-4668

    • MongoUtilIT test failure - unable to connect to primary DBZ-4676

    • Update shared UG deployment file for use with downstream OCP Install Guide DBZ-4700

    • Indicate ROWID is not supported by XStream DBZ-4702

    Release 1.8.0.Final (December 16th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 3.0.0 and has been tested with version 3.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    The incubating SMT for extracting the new document state from MongoDB (link to https://debezium.io/documentation/reference/stable/transformations/mongodb-event-flattening.html) change events propagates the document’s id within a field named _id within the change event value (DBZ-4413). This is to avoid collisions with existing other document fields with the previously used name id.

    New Features

    • Allow to configure custom Hibernate user types for Quarkus outbox extension DBZ-3552

    • Create a Debezium schema generator for Debezium connectors (initial work) DBZ-4393

    Fixes

    • Outbox Event Router not working in Oracle Connector DBZ-3940

    • some data type is not working for sending signals to a Debezium connector DBZ-4298

    • Debezium UI - Connector create fails if topic group defaults not specified DBZ-4378

    Other changes

    • Intermittent test failure: SqlServerChangeTableSetIT#readHistoryAfterRestart() DBZ-3306

    • Upgrade to Apicurio Registry 2.0 (QE, docs) DBZ-3629

    • Oracle upstream tests in internal CI DBZ-4185

    • Document MongoDB source format DBZ-4420

    • Missing log message for snapshot.locking.mode = none DBZ-4426

    • Caching not working in formatting job DBZ-4429

    • Optionally assemble Oracle connector distribution without Infinispan DBZ-4446

    • Simplify the implementation of method duration in debezium/util/Strings.java DBZ-4423

    • Exclude log4j from Debezium Server distribution in 1.8 DBZ-4452

    Release 1.8.0.CR1 (December 9th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 3.0.0 and has been tested with version 3.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    The Debezium connector for Vitess exposes a new attribute, "keyspace", in the "source" block of emitted change events (DBZ-4412).

    Vitess connector: The mapping for UINT64 columns has been changed from INT64 to String, so as to avoid a value overflow for values larger than 2^63 (DBZ-4403).

    The source block structure of change events emitted by the incubating Debezium connector for Vitess has been adjusted (DBZ-4428):

    • The obsolete attribute "schema" has been removed

    • The attribute "db" is now always empty; the keyspace from which a change event originated, can be retrieved via the new "keyspace" attribute

    New Features

    • Implement incremental snapshotting for MongoDB DBZ-3342

    • Add schema descriptors for the UI JSON Schema for SQL Server Connector DBZ-3697

    • Optionally add OPTION(RECOMPILE) to incremental snapshot queries DBZ-4249

    • Log count of changed records sent DBZ-4341

    • Add support for truncate in oracle connector DBZ-4385

    • Support binary_handling_mode for Oracle connector DBZ-4400

    • Enforce consistent vgtid representation in vitess connector DBZ-4409

    Fixes

    • Parallel write can be lost during catch-up phase DBZ-2792

    • None of log files contains offset SCN (SCN offset is no longer available in the online redo logs) DBZ-3635

    • [Debezium Server] Event Hubs exporter slow/Event data was too large DBZ-4277

    • NullPointer exception on Final stage of snapshot for Oracle connector DBZ-4376

    • Oracle pipeline matrix docker conflict DBZ-4377

    • System testsuite unable to pull apicurio operator from quay DBZ-4382

    • Oracle DDL Parser Error DBZ-4388

    • DDL couldn’t be parsed: 'analyze table schema.table estimate statistics sample 5 percent;' DBZ-4396

    • MySQL: DDL Statement could not be parsed 'GRANT' DBZ-4397

    • Support keyword CHAR SET for defining charset options DBZ-4402

    • Xstream support with LOB unavailable value placeholder support is inconsistent DBZ-4422

    • Oracle Infinispan buffer fails to serialize unavailable value placeholders DBZ-4425

    • VStream gRPC connection closed after being idle for a few minutes DBZ-4389

    Other changes

    • Oracle testing in system-level testsuite DBZ-3963

    • Upgrade to Quarkus 2.5.0.Final DBZ-4035

    • Document incremental chunk size setting DBZ-4127

    • Complete CDC implementation based on MongoDB Change Streams DBZ-4205

    • Record video demo showing Kafka topics creation and transformation UIs DBZ-4260

    • Add Oracle 12.2.0.1 to internal CI Oracle job DBZ-4322

    • OracleClobDataTypeIT shouldNotStreamAnyChangesWhenLobEraseIsDetected may fail randomly DBZ-4384

    • Upgrade impsort-maven-plugin from 1.6.0 to 1.6.2 DBZ-4386

    • Upgrade formatter-maven-plugin from 2.15.0 to 2.16.0 DBZ-4387

    • Unstable test for online DDL changes DBZ-4391

    • Create Debezium Kafka Connect REST Extension DBZ-4028

    Release 1.8.0.Beta1 (November 30th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 3.0.0 and has been tested with version 3.0.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Vitess connector no longer depends on vtcld (DBZ-4324).

    Oracle connector inserts a placeholder value for unavaliable CLOB/BLOB columns (DBZ-4276).

    New Features

    • Support pg_logical_emit_message DBZ-2363

    • Outbox Event Router for MongoDB DBZ-3528

    • Improve interval type support in Oracle DBZ-1539

    • money data type should be controlled by decimal.handling.mode DBZ-1931

    • Support for Multiple Databases per SQL Server Connector DBZ-2975

    • Debezium server stops with wrong exit code (0) DBZ-3570

    • Change Debezium UI configurations property names DBZ-4066

    • Extend configuration support for Infinispan caches DBZ-4169

    • Support schema changes during incremental snapshot DBZ-4196

    • Handle login failure during instance upgrade as retriable DBZ-4285

    • Modify the type of aggregateid in MongoDB Outbox Event Router DBZ-4318

    • Explicit the MS SQL Materialized view limitation DBZ-4330

    Fixes

    • PostgresConnector does not allow a numeric slot name DBZ-1042

    • False empty schema warning for snapshot mode never DBZ-1344

    • Tutorial shows incorrectly shows "op": "c" for initial change events DBZ-3786

    • SQL Server fails to read CDC events if there is a schema change ahead DBZ-3992

    • Once user click on "Review and finish" button that step in link in not enabled in wizard side menu. DBZ-4119

    • DDL statement couldn’t be parsed DBZ-4224

    • The lastOffset variable in MySqlStreamingChangeEventSource is always null DBZ-4225

    • Unknown entity: io.debezium.outbox.quarkus.internal.OutboxEvent DBZ-4232

    • Signal based incremental snapshot is failing when launched right after a schema change DBZ-4272

    • SQL Server connector doesn’t handle multiple capture instances for the same table with equal start LSN DBZ-4273

    • Debezium UI - some issues with browser support for replaceAll DBZ-4274

    • AbstractDatabaseHistory.java has typo DBZ-4275

    • OracleConnectorIT - two tests fail when using Xstream DBZ-4279

    • ParsingException: DDL statement couldn’t be parsed DBZ-4280

    • Topic Group UI step does not refresh correctly after setting properties DBZ-4293

    • Add MariaDB specific username for MySQL parser DBZ-4304

    • NullPointerException may be thrown when validating table and column lengths DBZ-4308

    • RelationalChangeRecordEmitter calls "LoggerFactory.getLogger(getClass())" for each instance of the emitter DBZ-4309

    • support for JSON function in MySQL index DBZ-4320

    • Avoid holding table metadata lock in read-only incremental snapshots DBZ-4331

    • Convert mysql time type default value error DBZ-4334

    • Wrong configuration option name for MongoDB Outbox SMT DBZ-4337

    • Incremental Snapshot does not pick up table DBZ-4343

    • Oracle connector - Cannot parse column default value 'NULL ' to type '2' DBZ-4360

    Other changes

    • Add canonical URL links to older doc versions DBZ-3897

    • Set up testing job for MongoDB 5.0 DBZ-3938

    • Misc. documentation changes for the Debezium MySQL connector DBZ-3974

    • Promote Outbox SMT to GA DBZ-4012

    • Test failure: SchemaHistoryTopicIT::schemaChangeAfterSnapshot() DBZ-4082

    • Jenkins job for creating image snapshot used by new Jenkins nodes DBZ-4122

    • Use SMT/Transformation UI backend endpoint DBZ-4146

    • Create GH Action for tearing down abandoned website preview environments DBZ-4214

    • Unify Memory and Infinispan event processor implementations DBZ-4236

    • Update system-level testsuite CI job DBZ-4267

    • Upgrade MySQL JDBC driver to 8.0.27 DBZ-4286

    • Only build debezium-core and dependences in cross-repo builds DBZ-4289

    • Reduce log verbosity DBZ-4291

    • Vitess connector should expose vstream flags DBZ-4295

    • Vitess connector should allow client to config starting VGTID DBZ-4297

    • Layout glitch on docs landing page DBZ-4299

    • Provide outbox routing example for MongoDB DBZ-4302

    • Fix wrong option names in examples DBZ-4311

    • Update functional test CI to work with downstream source archive DBZ-4316

    • Provide example showing usage of remote Infinispan cache DBZ-4326

    • Provide CI for MongoDB 4.4 DBZ-4327

    • Test case for schema migration in Vitess connector DBZ-4353

    • Enable transaction metadata for vitess connector DBZ-4355

    • io.debezium.data.VerifyRecord.isValid(SourceRecord) is a no-op DBZ-4364

    • SignalsIT times out after 6h on CI DBZ-4370

    • Document incremental chunk size setting DBZ-4127

    Release 1.8.0.Alpha2 (November 11th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Previous versions stored buffered events in an Infinispan cache as a part of transaction data that could lead to memory issues upon load. This version separates transaction metadata and buffered events in different caches so the data is not loaded with transaction metadata processing (DBZ-4159).

    New Features

    • TableChangesSerializer ignored defaultValue and enumValues DBZ-3966

    • Support for heartbeat action queries for MySQL DBZ-4029

    • Expose the transaction topicname as a config DBZ-4077

    • Improvement to the topic creation step DBZ-4172

    • Process transaction started/committed in MySQL read-only incremental snapshot DBZ-4197

    • Ability to use base image from authenticated registry with KC build mechanism DBZ-4227

    • Remove SqlServerConnector database.user Required Validator DBZ-4231

    • Specify database hot name as 0.0.0.0 for Oracle connector tests CI DBZ-4242

    • Suport all charsets in MySQL parser DBZ-4261

    Fixes

    • "table" is null for table.include.list and column.include.list DBZ-3611

    • Debezium server crashes when deleting a record from a SQLServer table (redis sink) DBZ-3708

    • Invalid default value error on captured table DDL with default value DBZ-3710

    • Incremental snapshot doesn’t work without primary key DBZ-4107

    • Error: PostgresDefaultValueConverter - Cannot parse column default value 'NULL::numeric' to type 'numeric'. Expression evaluation is not supported. DBZ-4137

    • Container images for Apache Kafka and ZooKeeper fail to start up DBZ-4160

    • Debezium 1.7 image disables unsecure algorithms. Breaks unpatched databases DBZ-4167

    • DDL statement couldn’t be parsed - Modify Column DBZ-4174

    • DML statement couldn’t be parsed DBZ-4194

    • Debezium log miner processes get terminated with ORA-04030 error in idle database environment. DBZ-4204

    • DDL with Oracle SDO_GEOMETRY cannot be parsed DBZ-4206

    • DDL with Oracle sequence as default for primary key fails schema generation DBZ-4208

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira issue with the statement 'DROP TABLE IF EXISTS condition' DBZ-4210

    • Support MySQL Dual Passwords in DDL Parser DBZ-4215

    • Debezium Metrics not being set correctly DBZ-4222

    • CREATE PROCEDURE DDL throws ParsingException DBZ-4229

    • Exception ORA-00310 is not gracefully handled during streaming DBZ-4230

    • CHAR / NCHAR precision is not correctly derived from DDL statements DBZ-4233

    • Oracle connector parses NUMBER(*,0) as NUMBER(0,0) in DDL DBZ-4240

    • Signal based incremental snapshot is failing if database name contains dash DBZ-4244

    • SQL Server connector doesn’t handle retriable errors during database state transitions DBZ-4245

    • Does Debezium support database using charset GB18030? DBZ-4246

    • Broken anchors in Debezium Documentation DBZ-4254

    • Reduce verbosity of logging Oracle memory metrics DBZ-4255

    • When Debezium executes select * in the snapshot phase, it does not catch the sql exception, resulting in confusing exceptions and logs DBZ-4257

    Other changes

    • Rename "master" branches to "main" for remaining repos DBZ-3626

    • Support Oracle Logminer docker image in system level test-suite DBZ-3929

    • Missing documentation for max.iteration.transactions option DBZ-4129

    • Use topic auto-creation UI backend endpoint DBZ-4148

    • Remove superfluous build triggers DBZ-4200

    • Tag debezium/tooling:1.2 version DBZ-4238

    • Rework MySqlTimestampColumnIT test DBZ-4241

    • Remove unused code DBZ-4252

    • Optimize tooling image DBZ-4258

    • Change DB2 image in testsuite to use private registry DBZ-4268

    Release 1.8.0.Alpha1 (October 27th, 2021)

    Kafka compatibility

    This release has been built against Kafka Connect 2.8.0 and has been tested with version 2.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.8.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.8.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.8.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our docker images then do not forget to pull them fresh from Docker registry.

    Breaking changes

    There are no breaking changes in this release.

    New Features

    • Provide MongoDB CDC implementation based on 4.0 change streams DBZ-435

    • No option fullDocument for the connection to MongoDB oplog.rs DBZ-1847

    • Make antora playbook_author.yml use current branch DBZ-2546

    • Support Kerberos for Debezium MS SQL plugin DBZ-3517

    • Make "snapshot.include.collection.list" case insensitive like "table.include.list" DBZ-3895

    • Exclude usernames at transaction level DBZ-3978

    • [oracle] Add the SCHEMA_ONLY_RECOVERY snapshot mode DBZ-3986

    • Support parse table and columns comment DBZ-4000

    • Upgrade postgres JDBC driver to version 42.2.24 DBZ-4046

    • Support JSON logging formatting DBZ-4114

    • Upgrade mysql-binlog-connector-java to v0.25.4 DBZ-4152

    • Wrong class name in SMT predicates documentation DBZ-4153

    • Log warning when table/column name exceeds maximum allowed by LogMiner DBZ-4161

    • Add Redis to debezium-server-architecture.png DBZ-4190

    • wrong variable naming in an unit test for Outbox Event Router SMT DBZ-4191

    • MongoDB connector support user defined topic delimiter DBZ-4192

    • Parse the "window" keyword for agg and nonagg function in mysql8 DBZ-4193

    • wrong field on change event message example in MongoDB Connector documentation DBZ-4201

    • Add a backend service for UI to fetch the SMT and topic auto-creation configuration properties DBZ-3874

    Fixes

    • Debezium build is unstable for Oracle connector DBZ-3807

    • Row hashing in LogMiner Query not able to differentiate between rows of a statement. DBZ-3834

    • The chunk select statement is incorrect for combined primary key in incremental snapshot DBZ-3860

    • Crash processing MariaDB DATETIME fields returns empty blob instead of null (Snapshotting with useCursorFetch option) DBZ-4032

    • column.the mask.hash.hashAlgorithm.with…​. data corruption occurs when using this feature DBZ-4033

    • Compilation of MySQL grammar displays warnings DBZ-4034

    • Infinispan SPI throws NPE with more than one connector configured to the same Oracle database DBZ-4064

    • Extra double quotes on Kafka message produced by Quarkus Outbox Extension DBZ-4068

    • Debezium Server might contain driver versions pulled from Quarkus DBZ-4070

    • Connection failure while reading chunk during incremental snapshot DBZ-4078

    • Postgres 12/13 images are not buildable DBZ-4080

    • Postgres testsuite hangs on PostgresConnectorIT#exportedSnapshotShouldNotSkipRecordOfParallelTx DBZ-4081

    • CloudEventsConverter omits payload data of deleted documents DBZ-4083

    • Database history is constantly being reconfigured DBZ-4106

    • projectId not being set when injecting a custom PublisherBuilder DBZ-4111

    • Oracle flush table should not contain multiple rows DBZ-4118

    • Can’t parse DDL for View DBZ-4121

    • SQL Server Connector fails to wrap in flat brackets DBZ-4125

    • Oracle Connector DDL Parsing Exception DBZ-4126

    • Debezium deals with Oracle DDL appeared IndexOutOfBoundsException: Index: 0, Size: 0 DBZ-4135

    • Oracle connector throws NPE during streaming in archive only mode DBZ-4140

    • debezium-api and debezium-core jars missing in NIGHTLY Kafka Connect container image libs dir DBZ-4147

    • Trim numerical defaultValue before converting DBZ-4150

    • Possible OutOfMemoryError with tracking schema changes DBZ-4151

    • DDL ParsingException - not all table compression modes are supported DBZ-4158

    • Producer failure NullPointerException DBZ-4166

    • DDL Statement couldn’t be parsed DBZ-4170

    • In multiple connect clusters monitoring, no matter which cluster is selected from the dropdown list, the detailed information is always for the first cluster. DBZ-4181

    • Remove MINUSMINUS operator DBZ-4184

    • OracleSchemaMigrationIT#shouldNotEmitDdlEventsForNonTableObjects fails for Xstream DBZ-4186

    • Certain LogMiner-specific tests are not being skipped while using Xstreams DBZ-4188

    • Missing debezium/postgres:14-alpine in Docker Hub DBZ-4195

    • nulls for some MySQL properties in the connector-types backend response DBZ-3108

    Other changes

    • Test with new deployment mechanism in AMQ Streams DBZ-1777

    • Incorrect documentation for message.key.columns DBZ-3437

    • Re-enable building PostgreSQL alpine images DBZ-3691

    • Upgrade to Quarkus 2.2.3.Final DBZ-3785

    • Document awareness of Oracle database tuning DBZ-3880

    • Publish website-builder and tooling images once per week DBZ-3907

    • Intermittent test failure on CI - RecordsStreamProducerIT#shouldReceiveHeartbeatAlsoWhenChangingNonWhitelistedTable() DBZ-3919

    • Please fix vulnerabilites DBZ-3926

    • Error processing binlog event DBZ-3989

    • Upgrade Java version for GH actions DBZ-3993

    • Replace hard-coded version of MySQL example image with getStableVersion() DBZ-4005

    • Handle SCN gap DBZ-4036

    • Upgrade to Apache Kafka 3.0 DBZ-4045

    • Recreate webhook for linking PRs to JIRA issues DBZ-4065

    • Recipient email address should be a variable in all Jenkins jobs DBZ-4071

    • Allow [ci] tag as commit message prefix DBZ-4073

    • Debezium Docker build job fails on rate limiter DBZ-4074

    • Add Postgresql 14 container image (Alpine) DBZ-4075

    • Add Postgresql 14 container image DBZ-4079

    • Fail Docker build scripts on error DBZ-4084

    • Display commit SHA in page footer DBZ-4110

    • Handle large comparisons results from GH API to address missing authors in release workflow DBZ-4112

    • Add debezium-connect-rest-extension module to GH workflows DBZ-4113

    • Display commit SHA in documentation footer DBZ-4123

    • Add Debezium Kafka Connect REST Extension to Debezium Kafka Connect NIGHTLY container image DBZ-4128

    • Migrate from Gitter to Zulip DBZ-4142

    • Postgres module build times out after 6h on CI DBZ-4145

    • Misc. MongoDB connector docs fixes DBZ-4149

    • Document Oracle buffering solutions DBZ-4157

    • Close open file handle DBZ-4164

    • Outreach jobs should test all connectors DBZ-4165

    • Broken link in MySQL docs DBZ-4199

    • Expose outbox event structure at level of Kafka Connect messages DBZ-1297

    \ No newline at end of file diff --git a/releases/1.9/index.html b/releases/1.9/index.html index 0263e4d6ea..6bdd5be70e 100644 --- a/releases/1.9/index.html +++ b/releases/1.9/index.html @@ -1 +1 @@ - Debezium Release Series 1.9

    stable

    Tested Versions

    Java 11+
    Kafka Connect 1.x, 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.28
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2, 4.4, 5.0
    Driver: 4.3.3
    PostgreSQL Database: 10, 11, 12, 13, 14
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.3.5
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0
    SQL Server Database: 2017, 2019
    Driver: 9.4.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file + Debezium Release Series 1.9

    stable

    Tested Versions

    Java 11+
    Kafka Connect 1.x, 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.28
    MongoDB Database: 3.2, 3.4, 3.6, 4.0, 4.2, 4.4, 5.0
    Driver: 4.3.3
    PostgreSQL Database: 10, 11, 12, 13, 14
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.3.5
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0
    SQL Server Database: 2017, 2019
    Driver: 9.4.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    \ No newline at end of file diff --git a/releases/1.9/release-notes.html b/releases/1.9/release-notes.html index 8db0ea5aa5..97c710a49b 100644 --- a/releases/1.9/release-notes.html +++ b/releases/1.9/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 1.9

    Release Notes for Debezium 1.9

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.9.8.Final (December 15th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.8.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.8.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.8.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Postgres existing publication is not updated with the new table DBZ-3921

    • Improve LogMiner query performance by reducing REGEXP_LIKE disjunctions DBZ-5648

    • Restart SQL Server task on "Cannot continue the execution because the session is in the kill state" exception DBZ-5777

    • Replace obsolete DebeziumDownload attribute DBZ-5835

    • Support logical decoding from Postgres 16 stand-bys DBZ-7181

    Fixes

    • MongoConnector’s field exclusion configuration does not work with fields with the same name but from different collections DBZ-4846

    • ORA-01003: no statement parsed DBZ-5352

    • NullPointerException thrown during snapshot of tables in Oracle source connector DBZ-5738

    • Exclude Oracle Compression Advisor tables from capture to avoid infinite loop DBZ-5756

    • Using DBMS_LOB.ERASE by itself can lead to an unexpected UPDATE with null BLOB value DBZ-5773

    • No table filters found for filtered publication DBZ-5949

    Other changes

    • Review tutorial README for configuring Debezium to use Avro serialization on Red Hat OpenShift Service Registry DBZ-4616

    Release 1.9.7.Final (October 25th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.7.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.7.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.7.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • MySqlErrorHandler should handle SocketException DBZ-5486

    Fixes

    • ORA-01289: cannot add duplicate logfile DBZ-5276

    • Using snapshot boundary mode "all" causes DebeziumException on Oracle RAC DBZ-5302

    • Missing snapshot pending transactions DBZ-5482

    • Outbox pattern nested payload leads to connector crash DBZ-5654

    • Keyword virtual can be used as an identifier DBZ-5674

    • MongoDB Connector with DocumentDB errors with "{$natural: -1} is not supported" DBZ-5677

    • Function DATE_ADD can be used as an identifier DBZ-5679

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • MySqlConnector parse create view statement failed DBZ-5708

    • Debezium Server 1.9.6 is using MSSQL JDBC 7.2.2 instead of 9.4.1 DBZ-5711

    • Vitess: Handle Vstream error: unexpected server EOF DBZ-5722

    • ParsingException: DDL statement couldn’t be parsed (index hints) DBZ-5724

    • Oracle SQL parsing error when collation used DBZ-5726

    • Unparseable DDL statement DBZ-5734

    • Remove note from snapshot metrics docs file that flags incremental snapshots as TP feature DBZ-5748

    Other changes

    • SQL Server connector docs should mention multi-task support DBZ-5714

    • Remove downstream TP designation for RAC content in Oracle connector docs DBZ-5735

    • Update Pulsar client to 2.10.1 DBZ-5737

    • Debezium connectors ship with an old version of google-protobuf vulnerable to CVE-2022-3171 DBZ-5747

    Release 1.9.6.Final (September 23rd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.6.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.6.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.6.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Read Debezium Metrics From Debezium Server Consumer DBZ-5235

    • Treat SQLServerException with "Broken pipe (Write failed)" exception message as a retriable exception DBZ-5292

    • Add INITIAL_ONLY to Db2 snapshot mode DBZ-5429

    • Unsupported non-relational tables should be gracefully skipped by the connector during streaming DBZ-5441

    • Restart SQL Server task on "Socket closed" exception DBZ-5478

    • Handle Vstream Connection reset DBZ-5551

    • Improve documentation editing experience by setting attributes for the preview DBZ-5576

    • Traditional snapshot process setting source.ts_ms DBZ-5591

    • Support for seting stats_sample_pages=default in alter table statements DBZ-5631

    • support for using any expression in kill statements DBZ-5636

    Fixes

    • System test-suite instabilties in CI DBZ-3862

    • Source info of incremental snapshot events exports wrong data DBZ-4329

    • Redis Sink config properties are not passed to DB history DBZ-5035

    • Memory leak in EventDeserializer caused by tableMapEventByTableId DBZ-5126

    • Postgres Incremental Snapshot on parent partitioned table not working DBZ-5240

    • Lob type data is inconsistent between source and sink, after modifying the primary key DBZ-5295

    • HTTP sink not retrying failing requests DBZ-5307

    • Caused by: java.io.EOFException: Failed to read next byte from position 2005308603 DBZ-5333

    • Unsigned tinyint conversion fails for MySQL 8.x DBZ-5343

    • NullPointerException thrown when unique index based on both system and non-system generated columns DBZ-5356

    • AWS DocumentDB (with MongoDB Compatibility) Connect Fail DBZ-5371

    • Debezium Cassandra 4 Connector not working with 1.9.4 release BUT works with 1.9.2 release DBZ-5380

    • org.postgresql.util.PSQLException: Bad value for type timestamp/date/time: CURRENT_TIMESTAMP DBZ-5384

    • Missing "previousId" property with parsing the rename statement in kafka history topic DBZ-5386

    • Check constraint introduces a column based on constraint in the schema change event. DBZ-5390

    • Clarify which database name to use for signal.data.collection when using Oracle with pluggable database support DBZ-5399

    • Timestamp with time zone column’s default values not in GMT DBZ-5403

    • Upgrade to Kafka 3.1 broke build compatibility with Kafka 2.x and Kafka 3.0 DBZ-5404

    • PostgresConnectorIT#shouldRecoverFromRetriableException fails randomly DBZ-5408

    • OracleConnectorIT waitForCurrentScnToHaveBeenSeenByConnector method can produce a NumberFormatException DBZ-5428

    • OffsetStore not stopped if it fails to fully start DBZ-5433

    • Translation from mongodb document to kafka connect schema fails when nested arrays contain no elements DBZ-5434

    • Duplicate SCNs on same thread Oracle RAC mode incorrectly processed DBZ-5439

    • Typo in sqlserver document. DBZ-5440

    • Typo in postgresql document. DBZ-5450

    • Create Index DDL fails to parse when using TABLESPACE clause with quoted identifier DBZ-5472

    • Outbox doesn’t check array consistecy properly when it detemines its schema DBZ-5475

    • Misleading statistics written to the log DBZ-5476

    • Debezium connector task didn’t retry when failover in mongodb 5 DBZ-5479

    • Oracle DATADUMP DDL cannot be parsed DBZ-5488

    • Mysql connector parser the ddl statement failed when including keyword "buckets" DBZ-5499

    • duplicate call to config.validateAndRecord() in RedisDatabaseHistory DBZ-5506

    • DDL statement couldn’t be parsed : mismatched input 'ENGINE' DBZ-5508

    • LogMiner DML parser incorrectly interprets concatenation operator inside quoted column value DBZ-5521

    • Mysql Connector DDL Parser does not parse all privileges DBZ-5522

    • SQL Server random test failures - EventProcessingFailureHandlingIT DBZ-5525

    • CREATE TABLE with JSON-based CHECK constraint clause causes MultipleParsingExceptions DBZ-5526

    • SQL Server test failure - verifyOffsets DBZ-5527

    • Support EMPTY column identifier DBZ-5550

    • Use TCCL as the default classloader to load interface implementations DBZ-5561

    • max.queue.size.in.bytes is invalid DBZ-5569

    • Vitess: Handle VStream close unepectedly DBZ-5579

    • Oracle connector parsing SELECT_LOB_LOCATOR event missing constant unavailable.value.placeholder DBZ-5581

    • Field validation errors are misleading for positive, non-zero expectations DBZ-5588

    • LIST_VALUE_CLAUSE not allowing TIMESTAMP LITERAL DBZ-5592

    • Orcale DDL does not support comments on materialized views DBZ-5595

    • Message with LSN foo larger than expected LSN bar DBZ-5597

    • Oracle DDL does not support DEFAULT ON NULL DBZ-5605

    • Datatype mdsys.sdo_geometry not supported DBZ-5609

    • MySQL connector cannot parse default value of decimal colum enclosed in double quotes DBZ-5630

    • Continuously WARNs about undo transactions when LOB is enabled DBZ-5635

    • Literal "${project.version}" in the source record instead of the actual version DBZ-5640

    • TABLE_TYPE keyword can be used as identifier DBZ-5643

    • Large numbers of ROLLBACK transactions can lead to memory leak when LOB is not enabled. DBZ-5645

    Other changes

    • Clean-up unused documentation variables DBZ-2595

    • Oracle SCAN VIP support DBZ-3987

    • Intermittent test failures on CI: EventProcessingFailureHandlingIT DBZ-4004

    • Downstream test for outbox event routing SMT DBZ-4266

    • No documentation for snapshot.include.collection.list property for Db2 connector DBZ-4345

    • Improve Filter SMT documentation / examples DBZ-4417

    • Update instructions for deploying Debezium on RHEL (downstream-only change) DBZ-5293

    • Build stable branches for connector-specific repos DBZ-5409

    • Address User guide review comments for Oracle connector DBZ-5418

    • OracleSchemaMigrationIT fails on non-pluggable (non-CDB) databases DBZ-5419

    • Update link format in shared tutorial file DBZ-5422

    • Remove community conditionalization in signaling doc for Oracle incremental and ad hoc snapshots content DBZ-5458

    • MySQL read.only property incorrectly appears in downstream documentation DBZ-5555

    • Upgrade binary log client to 0.27.2 DBZ-5620

    Release 1.9.5.Final (July 8th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.5.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Correct documentation of Event Router in expand.json mode DBZ-5296

    • Allow the maven version is greater than the current 3.8.4 DBZ-5299

    Fixes

    • Data duplication problem using postgresql source on debezium server DBZ-5070

    • Dupicate SCNs on Oracle RAC installations incorrectly processed DBZ-5245

    • NPE when using Debezium Embedded in Quarkus DBZ-5251

    • No changes to commit_scn when oracle-connector got new lob data DBZ-5266

    • database.history.store.only.captured.tables.ddl not suppressing logs DBZ-5270

    • Debezium server fail when connect to Azure Event Hubs DBZ-5279

    • ORA-01086 savepoint never established raised when database history topic cannot be created or does not exist DBZ-5281

    • Enabling database.history.store.only.captured.tables.ddl does not restrict history topic records DBZ-5285

    • Snapshot fails when table’s relational model is created using an abstract data type as unique index DBZ-5300

    • Incremental Snapshot: Oracle table name parsing does not support periods in DB name DBZ-5336

    • Support PostgreSQL default value function calls with schema prefixes DBZ-5340

    • Log a warning when an unsupported LogMiner operation is detected for a captured table DBZ-5351

    • MySQL Connector column hash v2 does not work DBZ-5366

    • Outbox JSON expansion fails when nested arrays contain no elements DBZ-5367

    • docker-maven-plugin needs to be upgraded for Mac Apple M1 DBZ-5369

    Other changes

    • Document use of JAR artifact to build Debezium scripting SMT into Kafka Connect DBZ-5227

    • Build Oracle connector by default without Maven profiles DBZ-5234

    • Remove reference to removed case insensitive option in Oracle README.md DBZ-5250

    • LogMinerHelperIT tests fail when executed against a multi-node Oracle RAC cluster DBZ-5301

    • Support skipping tests based on whether V$OPTION is enabled or disabled DBZ-5303

    • Upgrade to Apache Kafka 3.2.0 DBZ-5346

    • Oracle GitHub actions workflow no longer run tests on pushes DBZ-5349

    Release 1.9.4.Final (June 21st 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Include event scn in Oracle records DBZ-5225

    • Redis Store does not work with GCP Managed Redis DBZ-5268

    Fixes

    • Incorrect loading of LSN from offsets DBZ-3942

    • Database history recovery will retain old tables after they’ve been renamed DBZ-4451

    • Adding new table with incremental snapshots not working DBZ-4834

    • BigDecimal has mismatching scale value for given Decimal schema DBZ-4890

    • Debezium has never found starting LSN DBZ-5031

    • Cursor fetch is used for all results during connection DBZ-5084

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-5200

    • Debezium does NOT support "unix_timestamp() as DEFAULT value DBZ-5201

    • Oracle io.debezium.DebeziumException: io.debezium.DebeziumException: Failed to get transaction id for current SCN DBZ-5202

    • Debezium Postgres v1.9.3 fails in Materialize CI DBZ-5204

    • Oracle Connector failing due to ALTER TABLE for adding column with foreign key DBZ-5210

    • DDL statement couldn’t be parsed - Oracle connector 1.9.3.Final DBZ-5211

    • DDL statement couldn’t be parsed 2 - Oracle connector 1.9.3.Final DBZ-5230

    • Cannot convert field type tinyint(1) unsigned to boolean DBZ-5236

    • Oracle unparsable ddl create table DBZ-5237

    • Character set influencers are not properly parsed on default values DBZ-5241

    • Oracle LogMiner may fail with an in-progress transaction in an archive log that has been deleted DBZ-5256

    • Order of source block table names in a rename schema change event is not deterministic DBZ-5257

    • Debezium fails to connect to replicaset if a node is down DBZ-5260

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-5271

    • Deadlock during snapshot with Mongo connector DBZ-5272

    • Mysql parser is not able to handle variables in KILL command DBZ-5273

    Other changes

    • Confusing example for schema change topic DBZ-4713

    • Update cache-invalidation example DBZ-4754

    • MBean name registrations no longer correct in documentation DBZ-5153

    • Use ubi9 as the base image for Debezium UI DBZ-5199

    • Restore deleted topic heading in mongodb-outbox-event-router.adoc DBZ-5219

    • Create shared adoc fragments for specifying MBean name format in connector metrics sections DBZ-5233

    • Several Oracle tests do not get database name from TestHelper DBZ-5258

    Release 1.9.3.Final (June 2nd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Allow mongodb-connector to decode Binary payloads DBZ-4600

    • ORA-04030: out of process memory when trying to allocate 65568 bytes (Logminer LCR c,krvxrib:buffer) DBZ-4963

    • Include heartbeat table to the heartbeat process in the Debezium Oracle Connector DBZ-5119

    • Avoid reading entire schema history file into memory in the test suite DBZ-5129

    • Expose more useful exception info with building the field default value schema DBZ-5172

    Fixes

    • Error and connector stops when DDL contains lateral DBZ-4780

    • Schema changes should flush SCN to offsets if there are no other active transactions DBZ-4782

    • Connector stops streaming after a re-balance DBZ-4792

    • MySQL connector increment snapshot failed parse datetime column lenth when connector set "snapshot.fetch.size": 20000 DBZ-4939

    • [MySQL Debezium] DDL Parsing error - CREATE OR REPLACE TABLE DBZ-4958

    • InstanceAlreadyExistsException during MongoDb connector metrics registration DBZ-5011

    • DateTimeParseException: Text 'infinity' could not be parsed in Postgres connector DBZ-5014

    • Fix inconsistent transaction id when handling transactional messages in Vitess connector DBZ-5063

    • Debezium MCS Error when changing Postgres port DBZ-5067

    • 4 Connections per connector (postgres) DBZ-5074

    • Oracle documentation refers to archive_log_target rather than archive_lag_target DBZ-5076

    • 'ALTER TABLE mytable DROP FOREIGN KEY IF EXISTS mytable_fk' no viable alternative at input 'ALTER TABLE mytable DROP FOREIGN KEY IF' DBZ-5077

    • Oracle Logminer: records missed during switch from snapshot to streaming mode DBZ-5085

    • Interrupting a snapshot process can hang for some JDBC drivers DBZ-5087

    • Debezium fails to undo change event due to transaction id ending in ffffffff with LogMiner DBZ-5090

    • Postgresql connector does not retry one some errors when postgres is taken offline DBZ-5097

    • Parsing zero day fails DBZ-5099

    • Cannot Set debezium.sink.kafka.producer.ssl.endpoint.identification.algorithm to empty value DBZ-5105

    • Debezium connector failed with create table statement DBZ-5108

    • Current version of surefire/failsafe skips tests on failure in BeforeAll DBZ-5112

    • Test IncrementalSnapshotIT##schemaChanges fails randomly DBZ-5131

    • Cannot parse default value 0.000000000000000000 for bigint column DBZ-5134

    • MilliSecondsBehindSource is not reported by SQL Server connector DBZ-5137

    • Restarting mysql connector task fails with: java.lang.RuntimeException: Unable to register the MBean DBZ-5138

    • No raising of "WARN Event for transaction X has already been processed, skipped." DBZ-5140

    • Oracle connector restarts after ORA-01291 DBZ-5148

    • TestContainers method DebeziumContainer#getConnectorTaskState can raise a NullPointerException DBZ-5159

    • ExtractNewRecordState SMT Replaces Null Value with Column’s Default Value DBZ-5166

    • Oracle connector metrics tracking of rollback and abandoned transactions may cause high memory usage DBZ-5179

    • Debezium official documentation typo DBZ-5040

    • Should be sleep with REGISTRATION_RETRY_DELAY when registry MBean failed DBZ-5141

    Other changes

    • Restructure documentation for custom converters DBZ-4588

    • Document xmin.fetch.interval.ms property for Postgres connector DBZ-4734

    • Add FAQ about ORA-01882 and Oracle 11 to documentation DBZ-5057

    • Rename "Mysql" to "MySql" in related MysqlFieldReader interface DBZ-5078

    • Remove auto-generation and default values for MySQL database.server.id DBZ-5101

    • Upgrade Jackson Databind to 2.13.2.2 DBZ-5107

    • Switch to released version of Fixture5 extension in System testsuite DBZ-5114

    • Use range to activate jdk11 profile DBZ-5118

    • Misc edits to prepare Oracle connector docs for GA DBZ-5132

    • Pro-actively detect issues with LogMiner records DBZ-5147

    • Align Postgresql driver with Quarkus DBZ-5060

    Release 1.9.2.Final (April 29th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • adjust LogMiner batch size based on comparison with currently used batch size DBZ-5005

    Fixes

    • Connector throws java.lang.ArrayIndexOutOfBoundsException DBZ-3848

    • Document no relevant tables should be in the SYS or SYSTEM tablespaces. DBZ-4762

    • Unable to mine Oracle source table which have "/" in table names DBZ-5006

    • SQL Server in multi-partition mode fails if a new database is added to an existing configuration DBZ-5033

    • Debezium Server tarball 1.9.1 does not work DBZ-5037

    • Mysql tests start before MySQL DB constainer is running DBZ-5054

    • Debezium server configuration properties not rendered correctly DBZ-5058

    Other changes

    There are no other changes in this release.

    Release 1.9.1.Final (April 21st 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Extract component preparation from test-suite job DBZ-4601

    • Making Postgres PSQLException: This connection has been closed. retriable DBZ-4948

    Fixes

    • Simplify and clean up system testsuite job DBZ-4570

    • Getting java.sql.SQLException: ORA-01291: missing logfile while running with archive log only DBZ-4879

    • Debezium uses wrong LCR format for Oracle 12.1 DBZ-4932

    • Oracle duplicates on connector restart DBZ-4936

    • Oracle truncate causes exception DBZ-4953

    • NPE caused by io.debezium.connector.oracle.antlr.listener.ColumnDefinitionParserListener.resolveColumnDataType DBZ-4976

    • Oracle connector may throw NullPointerException when stopped after an unsuccessful startup DBZ-4978

    • NPE for non-table related DDLs DBZ-4979

    • CTE statements aren’t parsed by MySQL connector DBZ-4980

    • Unsupported MySQL Charsets during Snapshotting for fields with custom converter DBZ-4983

    • Outbox Transform does not allow expanded payload with additional fields in the envelope DBZ-4989

    • Redis Sink - clientSetname is taking place before auth DBZ-4993

    • CLOB with single quotes causes parser exception DBZ-4994

    • Oracle DDL parser fails on references_clause with no column list DBZ-4996

    • Can’t use 'local' database through mongos DBZ-5003

    • Triggering Incremental Snapshot on MongoDB connector throws json parsing error DBZ-5015

    • Redis Sink - Check if client is not null before closing it DBZ-5019

    Other changes

    • QE jenkins jobs consolidation DBZ-4235

    • Create trigger job for connector jobs DBZ-4558

    • Debezium UI dependency updates DBZ-4881

    • Read-only incremental snapshots blog post DBZ-4917

    • Update Pulsar client version used by Debezium Server DBZ-4961

    • Intermittent failure of RedisStreamIT.testRedisConnectionRetry DBZ-4966

    • Debezium raised an exception and the task was still running DBZ-4987

    • Nexus Staging Maven plugin is incompatible with OpenJDK 17 DBZ-5025

    • OracleOffsetContextTest should be scoped to LogMiner only DBZ-5028

    • Scope several new Oracle tests to LogMiner only DBZ-5029

    Release 1.9.0.Final (April 5th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Ability to support all Redis connection schemes DBZ-4511

    • pass SINK config properties to OffsetStore and DatabaseHistory adapters DBZ-4864

    • Migrate test-suite fixtures to JUnit extension DBZ-4892

    • Use Jedis' clientSetname when establishing Redis connections DBZ-4911

    Fixes

    • MySQL connector fails to parse default integer value expressed as decimal DBZ-3541

    • Cannot use Secrets in Debezium server connector config DBZ-4742

    • spatial_ref_sys table should be excluded in Postgres connector DBZ-4814

    • Oracle: Parsing failed for SEL_LOB_LOCATOR sql: 'DECLARE DBZ-4862

    • Oracle connector stops calling logminer without any error message DBZ-4884

    • Single quotes replication DBZ-4891

    • Oracle keeps trying old scn even if it had no changes DBZ-4907

    • Redis Sink - using Transaction does not work in sharded Redis DBZ-4912

    • Oracle connector page have typo since version 1.5. DBZ-4913

    • CVE-2022-26520 jdbc-postgresql: postgresql-jdbc: Arbitrary File Write Vulnerability [rhint-debezium-1] DBZ-4916

    • Kafka topics list throw exception DBZ-4920

    • Spelling mistake in doc about Oracle metrics DBZ-4926

    • MariaDB Trigger Parsing Error DBZ-4927

    • NPE during snapshotting MySQL database if custom converters present and column is null DBZ-4933

    • Avro converter requires Guava in lib directory DBZ-4935

    • Debezium Server 1.9 Fails to start up when transferring 1.8 offsets DBZ-4937

    • Missing images for 1.9.0.Beta1 and 1.9.0.CR1 releases DBZ-4943

    Other changes

    • Document "schema.include.list"/"schema.exclude.list" for SQL Server connector DBZ-2793

    • Align decimal.handling.mode documentation for Oracle like other connectors DBZ-3317

    • Use Red Hat Maven repo for custom build image in docs DBZ-4392

    • Upgrade postgres driver to version 42.3.3 DBZ-4919

    • Update Quality Outreach workflow to official Oracle Java GH action DBZ-4924

    • Bump jackson to 2.13.2 DBZ-4955

    Release 1.9.0.CR1 (March 25th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add support for Cassandra 4.x DBZ-2514

    • Exclude dummy events from database history DBZ-3762

    • Define how MCS container images should be build DBZ-4006

    • Document kafka-connect-offset related properties DBZ-4014

    • Update UI dependency and it’s configuration accordingly  DBZ-4636

    • Save and load database history in Redis DBZ-4771

    • Provide the Federated module UI component for DBZ Connector edit Flow DBZ-4785

    • Switch to fabric8 model provided by Apicurio team DBZ-4790

    • Merge the Data and Runtime option page in federated component. DBZ-4804

    • Add task id and partition to the logging context for multi-partition connectors DBZ-4809

    • run.sh is not working in windows environment DBZ-4821

    • Log the tableId is null when filter out some tables DBZ-4823

    • Debezium Mysql connector can’t handle CREATE INDEX IF NOT EXISTS (MariaDB) DBZ-4841

    • Postgresql connector prints uninformative log on snapshot phase DBZ-4861

    Fixes

    • SchemaNameAdjuster is too restrictive by default DBZ-3535

    • CVE-2022-21363 mysql-connector-java: Difficult to exploit vulnerability allows high privileged attacker with network access via multiple protocols to compromise MySQL Connectors [rhint-debezium-1] DBZ-4758

    • java.lang.NullPointerException while handling DROP column query DBZ-4786

    • Not reading the keystore/truststore when enabling MySQL SSL authentication DBZ-4787

    • "DebeziumException: Unable to find primary from MongoDB connection" post upgrade to 1.8.1 DBZ-4802

    • Oracle TO_DATE cannot be parsed when NLS parameter is provided DBZ-4810

    • Oracle test FlushStrategyIT fails DBZ-4819

    • Mysql: Getting ERROR Failed due to error: connect.errors.ConnectException: For input string: "false" DBZ-4822

    • Expect the null value with snapshot CapturedTables metric when skipping snapshotting DBZ-4824

    • MySQL 5.7 - no viable alternative at input 'ALTER TABLE ORD_ALLOCATION_CONFIG CHANGE RANK' DBZ-4833

    • missing notes on using db2 connector DBZ-4835

    • ParsingException when adding a new table to an existing oracle connector DBZ-4836

    • Supplemental log check fails when restarting connector after table dropped DBZ-4842

    • CREATE_TOPIC docker image regression DBZ-4844

    • Logminer mining session stopped due to several kinds of SQL exceptions DBZ-4850

    • DDL statement couldn’t be parsed DBZ-4851

    • Gracefully pass unsupported column types from DDL parser as OracleTypes.OTHER DBZ-4852

    • Debezium oracle connector stopped because of Unsupported column type: LONG DBZ-4853

    • Compilation of SqlServerConnectorIntegrator fails DBZ-4856

    • Maven cannot compile debezium-microbenchmark-oracle DBZ-4860

    • oracle connector fails because of Supplemental logging not properly configured DBZ-4869

    • Re-read incremental snapshot chunk on DDL event DBZ-4878

    • oracle connector fails because of unsupported column type nclob DBZ-4880

    • Debezium throws CNFE for Avro converter DBZ-4885

    Other changes

    • OpenShift deployment instruction improvements DBZ-2594

    • Add Kubernetes version of deployment page DBZ-2646

    • Log DML replication events instead of throwing an error DBZ-3949

    • Review SqlServerConnector properties DBZ-4052

    • Promote Outbox Quarkus extension to stable DBZ-4430

    • Restructure Oracle connector documentation DBZ-4436

    • Downstream docs for outbox event routing SMTs DBZ-4652

    • Promote incremental snapshots to stable and GA DBZ-4655

    • Remove legacy --zookeeper option from example instructions DBZ-4660

    • Use JdbcConfiguration instead of Configuration for JDBC config values DBZ-4801

    • Don’t set truststore/keystore parameters to system variables DBZ-4832

    • Docs: JDBC driver should go to Oracle connector dir DBZ-4883

    Release 1.9.0.Beta1 (March 3rd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Support Knative Eventing DBZ-2097

    • Provide UI option to view the configuration of the registered Debezium connector DBZ-3137

    • Handle out of order transaction start event DBZ-4287

    • Partition-scoped metrics for the SQL Server connector DBZ-4478

    • Save and load offsets in Redis DBZ-4509

    • Debezium Deploy Snapshots job is blocked for a long time DBZ-4628

    • Change DBZ UI Frontend to use new data_shape fields for Kafka message format DBZ-4714

    • Expect plain value instead of scientific exponential notation when using decimal string mode DBZ-4730

    Fixes

    • Long running transaction in Debezium 1.2.0 (PostgreSQL) DBZ-2306

    • "snapshot.include.collection.list" doesn’t work with the new MySQL connector implementation DBZ-3952

    • When running the NPM build I always end up with an updated/diverged package-lock.json DBZ-4622

    • Upgrade of Oracle connector causes NullPointerException DBZ-4635

    • Oracle-Connector fails parsing a DDL statement (external tables) DBZ-4641

    • oracle-connector DDL statement couldn’t be parsed DBZ-4662

    • Oracle parsing error for ALTER TABLE EXT_SIX LOCATION DBZ-4706

    • MySQL unparseable DDL - CREATE PROCEDURE DBZ-4707

    • Source timestamp timezone differs between snapshot and streaming records DBZ-4715

    • Document that Oracle Xstream emits DBMS_LOB method calls as separate events DBZ-4716

    • ORA-00308 raised due to offset SCN not being updated in a low traffic environment DBZ-4718

    • Property "log.mining.view.fetch.size" does not take effect DBZ-4723

    • Postgres debezium send wrong value of column has default NULL::::character varying in kafka message DBZ-4736

    • Oracle Logminer: streaming start offset is off by one DBZ-4737

    • Apache Pulsar example doesn’t work DBZ-4739

    • Oracle dbname/signal with dots parsed incorrectly DBZ-4744

    • Oracle DDL statement couldn’t be parsed DBZ-4746

    • Overly verbose Debezium Server Redis logs DBZ-4751

    • DDL statement couldn’t be parsed DBZ-4752

    • Redis runs OOM log in wrong scenario DBZ-4760

    • Relax parsing of Heap and Index organized DDL clauses DBZ-4763

    • java.lang.NoSuchMethodError: org.apache.kafka.clients.admin.NewTopic DBZ-4773

    • Connection validation fails for Db2 DBZ-4777

    • Test suite unable to run due to jackson dependency overlaps DBZ-4781

    Other changes

    • Improve rendering of linked option names DBZ-4301

    • Oracle connector downstream docs for 1.9 DBZ-4325

    • Use images from quay.io in docs and examples DBZ-4440

    • Create an internal FAQ for Oracle Connector DBZ-4557

    • Improve documentation about max_replication_slots DBZ-4603

    • Connector doc formatting and link fixes DBZ-4606

    • Add a backend service for UI to fetch the connector configuration DBZ-4627

    • Update downstream Getting Started guide to describe revised deployment mechanism DBZ-4632

    • Update downstream OCP Installation guide to describe revised deployment mechanism DBZ-4633

    • Changes config for renovate bot to auto-merge only for non-major update DBZ-4719

    • Incorrect connector version in Debezium RHEL Installation Guide DBZ-4721

    • Verify Debezium connector can be used with MongoDB Atlas DBZ-4731

    • Remove NATS example DBZ-4738

    • Upgrade to Quarkus 2.7.1.Final DBZ-4743

    • UI layout fixes DBZ-4748

    • Upgrade MySQL JDBC driver to 8.0.28 DBZ-4759

    • Nightly build artifacts not published DBZ-4766

    • Clarify need for link attributes in docs DBZ-4776

    Release 1.9.0.Alpha2 (February 9th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    For the incubating Debezium connector for Vitess, the mapping of BLOB and BINARY column types has changed from string to bytes (DBZ-4705).

    New features

    • Use main repo workflow for CI/CD checks in Debezium UI repository checks DBZ-3143

    • Build and deploy Debezium OpenAPI / JSON Schema definitions with every Debezium release DBZ-4394

    • Redis sink - Retry in case of connection error/OOM DBZ-4510

    • Make KAFKA_QUERY_TIMEOUT configurable DBZ-4518

    • MySQL history topic creation needs DESCRIBE_CONFIGS at the Cluster level DBZ-4547

    • Redis Sink - change records should be streamed in batches DBZ-4637

    • Link for apicurio-registry-distro-connect-converter packege is broken DBZ-4659

    • Extend Debezium Schema Generator DBZ-4665

    Fixes

    • Database.include.list results in tables being returned twice DBZ-3679

    • Suspected inconsistent documentation for 'Ad-hoc read-only Incremental snapshot' DBZ-4171

    • CVE-2021-2471 mysql-connector-java: unauthorized access to critical [rhint-debezium-1] DBZ-4283

    • Rhel preparation jenkins job pushes extra image DBZ-4296

    • Oracle Logminer: snapshot→stream switch misses DB changes in ongoing transactions DBZ-4367

    • Incremental snapshots does not honor column case sensitivity DBZ-4584

    • JSON data corrupted in update events DBZ-4605

    • nCaused by: Multiple parsing errors\nio.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira DBZ-4609

    • Jenkins job for creating image snapshot does not update gitlab certificate correctly DBZ-4611

    • Update the UI README node and npm requirements DBZ-4630

    • Parse including keyword column table ddl error DBZ-4640

    • Nightly installation links do not use snapshot repository download links DBZ-4644

    • schema_only_recovery mode not working for FileDatabaseHistory DBZ-4646

    • SQL Server ad-hoc snapshot - SnapshotType is case sensitive DBZ-4648

    • DDL parsing issue: ALTER TABLE …​ MODIFY PARTITION …​ DBZ-4649

    • Mark incompatible Xstream tests as LogMiner only DBZ-4650

    • DDL statement couldn’t be parsed mismatched input '`encrypted` DBZ-4661

    • debezium-examples fail when using confluentinc/cp-schema-registry:7.0.0 DBZ-4666

    • DDL parsing exception DBZ-4675

    • JdbcConnection#executeWithoutCommitting commits when auto-commit is enabled DBZ-4701

    • OracleSchemaMigrationIT fails with Xstream adapter DBZ-4703

    • Cannot expand JSON payload with nested arrays of objects DBZ-4704

    Other changes

    • Possible performance issue after Debezium 1.6.1 upgrade (from 1.5) DBZ-3872

    • Upgrade Jenkins and Introduce JCasC to jnovotny DBZ-3980

    • Random test failure - ZZZGtidSetIT#shouldProcessPurgedGtidSet DBZ-4294

    • Verify compatibility with Oracle 21c (21.3.0.0.0) DBZ-4305

    • Add metadata to OracleConnectorConfig for Debezium UI DBZ-4314

    • Release pipeline should check existence of GA version DBZ-4623

    • Release pipeline - conditionalize and parameterize backport check DBZ-4624

    • Migrating UI from webpack-dev-server v3 to v4 DBZ-4642

    • Don’t run checkstyle/dependency check on documentation-only pull requests or commits DBZ-4645

    • Cron-based Github Action to notify documentation changes in last x days DBZ-4653

    • Oracle DDL parser failure with supplemental log group clause with a custom name DBZ-4654

    • Build MCS container images for Debezium 1.9.0.Alpha1 and deploy to RHOAS quay container registry DBZ-4656

    • Upgrade postgres driver to version 42.3.2 DBZ-4658

    • Make sure right protoc version is applied DBZ-4668

    • Build trigger issues DBZ-4672

    • MongoUtilIT test failure - unable to connect to primary DBZ-4676

    • Upgrade to Quarkus 2.7.0.Final DBZ-4677

    • Update shared UG deployment file for use with downstream OCP Install Guide DBZ-4700

    • Indicate ROWID is not supported by XStream DBZ-4702

    Release 1.9.0.Alpha1 (January 26th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Support for the wal2json logical decoding plug-in, as used by the Debezium Postgres connector, has been deprecated. All users should move to the pgoutput or decoderbufs plug-ins. The wal2json plug-in is scheduled for removal in Debezium 2.0 (DBZ-3953).

    There were changes to the format of the Infinispan cache to support the storage of certain large object (LOB) events. If you use the Infinispan buffer cache implementation and enabled LOB support, the cache files must be deleted and a new snapshot taken due to these compatibility changes (DBZ-4366).

    The Debezium container images for Apache Kafka and Kafka Connect contain the log4j 1.x library, which is a runtime dependency of Kafka (it is not used in any way by Debezium). Several vulnerabilities were recently reported against some classes contained in that library. While these classes are used by neither Kafka (Connect) nor Debezium, the class files org/apache/log4j/net/JMSSink.class, org/apache/log4j/jdbc/, and /org/apache/log4j/chainsaw/ have been removed from the log4j 1.x JAR shipped with these container images as a measure of caution. If you actually need these classes, you should obtain the original log4j 1.x JAR and add this via custom images you derive from the Debezium ones. We advise against doing this though as per aforementioned vulnerabilities (DBZ-4568).

    New features

    • Debezium MySQL connector encounter latency in large DML of MySQL DBZ-3477

    • Add create/update/delete event seen metrics for monitor upstream dml operation DBZ-4351

    • Allow additional config options for Debezium Server Pubsub Connector DBZ-4375

    • Allow adhoc snapshots using signals in Oracle versions prior to 12c DBZ-4404

    • Fail MongoDB start when oplog is used for MongoDB 5+ DBZ-4415

    • Deprecated TruncateHandlingMode config property in favor of skipped_operations DBZ-4419

    • Introduce interfaces and default implementations for change event source metrics DBZ-4459

    • Create a Debezium schema generator for Debezium connectors (follow-up work) DBZ-4460

    • Make connector task partition readability for logs DBZ-4472

    • Remove unused brackets in MySqlParser DBZ-4473

    • Document DB permissions for Oracle Connector DBZ-4494

    • Add support for extra gRPC headers in Vitess connector DBZ-4532

    • Mining session stopped due to 'No more data to read from socket' DBZ-4536

    • A failure to register JMX metrics should fail the connector DBZ-4541

    • Debezium Engine should use topic names for conversion DBZ-4566

    • Allow user to define custom retriable message DBZ-4577

    • Implement Renovate to fix legacy-peer-deps issue with npm DBZ-4585

    • Typo in connect README DBZ-4589

    • Unsupported column type 'ROWID' error DBZ-4595

    • Cleanup project management in testsuite job DBZ-4602

    Fixes

    • NPE on PostgreSQL Domain Array DBZ-3657

    • MysqlSourceConnector issue with latin1 tables DBZ-3700

    • JSON Payload not expanding when enabling it DBZ-4457

    • Kafka Connect REST extension cannot be built with 1.9 DBZ-4465

    • DDL statement couldn’t be parsed DBZ-4485

    • Parse multiple signed/unsigned keyword from ddl statement failed DBZ-4497

    • Set the correct binlog serverId & threadId DBZ-4500

    • Null out query in read-only incremental snapshot DBZ-4501

    • R/O incremental snapshot can blocks the binlog stream on restart DBZ-4502

    • Drop the primary key column getting exception DBZ-4503

    • [MySQL Debezium] DDL Parsing error - curdate() & cast() DBZ-4504

    • Extra file checker-qual in PostgreSQL package DBZ-4507

    • website-builder image is not buildable DBZ-4508

    • Job for creating gold image not reading credentials correctly DBZ-4516

    • Replication stream retries are not configured correctly DBZ-4517

    • Add backend errors among retriable for Postgres connector DBZ-4520

    • Infinispan doesn’t work with underscores inside cache names DBZ-4526

    • Connector list should update immediately when a connector is deleted DBZ-4538

    • Mongo filters page show nulls in namespace name DBZ-4540

    • LogMinerHelperIT fails when running Oracle CI with a fresh database DBZ-4542

    • Oracle-Connector fails parsing a DDL statement (VIRTUAL keyword) DBZ-4546

    • DatabaseVersionResolver comparison logic skips tests unintendedly DBZ-4548

    • io.debezium.text.ParsingException when column name is 'seq' DBZ-4553

    • MySQL FLUSH TABLE[S] with empty table list not handled DBZ-4561

    • Debezium apicurio version is not aligned with Quarkus DBZ-4565

    • Oracle built-in schema exclusions should also apply to DDL changes DBZ-4567

    • mongo-source-connector config database.include.list does not work DBZ-4575

    • Can’t process column definition with length exceeding Integer.MAX_VALUE DBZ-4583

    • Oracle connector can’t find the SCN DBZ-4597

    Other changes

    • Set up CI for Oracle DBZ-732

    • Migrate logger used for tests to Logback DBZ-2224

    • Update downstream docs in regards to deprecated elements DBZ-3881

    • Broken links to the Transaction metadata topics from descriptions for provide.transaction.metadata property DBZ-3997

    • Add script to check for missing backports DBZ-4063

    • Protect release from using invalid version name DBZ-4072

    • Upgrade to Quarkus 2.6.2.Final DBZ-4117

    • Use Postgres 10 by default DBZ-4131

    • Give debezium-builder user privileges to access internal issues DBZ-4271

    • Point to supported versions in connector pages DBZ-4300

    • Allow for additional custom columns in an outbox table DBZ-4317

    • Log problematic values if they cannot be processed DBZ-4371

    • Run Jenkins CI on weekends too DBZ-4373

    • Update Postgres JDBC driver to 42.3.1 DBZ-4374

    • Release pipeline should use Jira API token DBZ-4383

    • Remove log.mining.log.file.query.max.retries configuration property DBZ-4408

    • Add Debezium Server example using Postgres and Pub/Sub DBZ-4438

    • Document Outbox SMT behaviour with postgres bytea_output = escape DBZ-4461

    • Run formatting check in the same connector/module workflows DBZ-4462

    • Upgrade SQL Server driver to 9.4 DBZ-4463

    • Add snapshot repository to Vitess connector DBZ-4464

    • REST extension tests must not depend on source code version DBZ-4466

    • snapshotPreceededBySchemaChange should not be tested for Db2 DBZ-4467

    • Debezium Server workflow should build PG connector without tests DBZ-4468

    • PostgresShutdownIT must not depend on Postgres version DBZ-4469

    • Updating jenkins job creating image snapshots DBZ-4486

    • Set jenkins jobs to store last 10 builds DBZ-4506

    • Provide a script to generate release notes section DBZ-4513

    • Remove INTERNAL_KEY_CONVERTER and INTERNAL_VALUE_CONVERTER env vars DBZ-4514

    • Bump protobuf version to the latest 3.x DBZ-4527

    • Document automatic log-switch setting for low-frequency change systems DBZ-4528

    • Organize properties of Db2 connector DBZ-4537

    • Update release procedure to cover required documentation config changes DBZ-4539

    • Module debezium-testing-testcontainers tests are not executed DBZ-4544

    • Check Debezium user logging after auth change DBZ-4545

    • Fix links to connector incremental snapshots topic DBZ-4552

    • Vitess connector image cannot be built DBZ-4559

    • Reduce GitHub action build times with formatting DBZ-4562

    • Doc updates to address downstream build issues DBZ-4563

    • Upgrade Avro converter to 7.0.1 and Apicurio to 2.1.5.Final DBZ-4569

    • Older degree of parallelism DDL syntax causes parsing exception DBZ-4571

    • Conditionalize note about outbox event router incompatibility DBZ-4573

    • Update description of snapshot.mode in postgresql.adoc DBZ-4574

    • Avoid build warning about maven-filtering missing plugin descriptor DBZ-4580

    • Fix build failure when xstream missing when building the micro benchmark for Oracle DBZ-4581

    • Update shared UG deployment file to clarify that connectors can use existing KC instance DBZ-4582

    • Test Failure - RecordsStreamProducerIT DBZ-4592

    • Upgrade Kafka to 3.1.0 DBZ-4610

    • Server transformation properties should refer to "type" rather than "class" DBZ-4613

    \ No newline at end of file + Release Notes for Debezium 1.9

    Release Notes for Debezium 1.9

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 1.9.8.Final (December 15th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.8.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.8.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.8.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Postgres existing publication is not updated with the new table DBZ-3921

    • Improve LogMiner query performance by reducing REGEXP_LIKE disjunctions DBZ-5648

    • Restart SQL Server task on "Cannot continue the execution because the session is in the kill state" exception DBZ-5777

    • Replace obsolete DebeziumDownload attribute DBZ-5835

    • Support logical decoding from Postgres 16 stand-bys DBZ-7181

    Fixes

    • MongoConnector’s field exclusion configuration does not work with fields with the same name but from different collections DBZ-4846

    • ORA-01003: no statement parsed DBZ-5352

    • NullPointerException thrown during snapshot of tables in Oracle source connector DBZ-5738

    • Exclude Oracle Compression Advisor tables from capture to avoid infinite loop DBZ-5756

    • Using DBMS_LOB.ERASE by itself can lead to an unexpected UPDATE with null BLOB value DBZ-5773

    • No table filters found for filtered publication DBZ-5949

    Other changes

    • Review tutorial README for configuring Debezium to use Avro serialization on Red Hat OpenShift Service Registry DBZ-4616

    Release 1.9.7.Final (October 25th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.7.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.7.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.7.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • MySqlErrorHandler should handle SocketException DBZ-5486

    Fixes

    • ORA-01289: cannot add duplicate logfile DBZ-5276

    • Using snapshot boundary mode "all" causes DebeziumException on Oracle RAC DBZ-5302

    • Missing snapshot pending transactions DBZ-5482

    • Outbox pattern nested payload leads to connector crash DBZ-5654

    • Keyword virtual can be used as an identifier DBZ-5674

    • MongoDB Connector with DocumentDB errors with "{$natural: -1} is not supported" DBZ-5677

    • Function DATE_ADD can be used as an identifier DBZ-5679

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • MySqlConnector parse create view statement failed DBZ-5708

    • Debezium Server 1.9.6 is using MSSQL JDBC 7.2.2 instead of 9.4.1 DBZ-5711

    • Vitess: Handle Vstream error: unexpected server EOF DBZ-5722

    • ParsingException: DDL statement couldn’t be parsed (index hints) DBZ-5724

    • Oracle SQL parsing error when collation used DBZ-5726

    • Unparseable DDL statement DBZ-5734

    • Remove note from snapshot metrics docs file that flags incremental snapshots as TP feature DBZ-5748

    Other changes

    • SQL Server connector docs should mention multi-task support DBZ-5714

    • Remove downstream TP designation for RAC content in Oracle connector docs DBZ-5735

    • Update Pulsar client to 2.10.1 DBZ-5737

    • Debezium connectors ship with an old version of google-protobuf vulnerable to CVE-2022-3171 DBZ-5747

    Release 1.9.6.Final (September 23rd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.6.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.6.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.6.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Read Debezium Metrics From Debezium Server Consumer DBZ-5235

    • Treat SQLServerException with "Broken pipe (Write failed)" exception message as a retriable exception DBZ-5292

    • Add INITIAL_ONLY to Db2 snapshot mode DBZ-5429

    • Unsupported non-relational tables should be gracefully skipped by the connector during streaming DBZ-5441

    • Restart SQL Server task on "Socket closed" exception DBZ-5478

    • Handle Vstream Connection reset DBZ-5551

    • Improve documentation editing experience by setting attributes for the preview DBZ-5576

    • Traditional snapshot process setting source.ts_ms DBZ-5591

    • Support for seting stats_sample_pages=default in alter table statements DBZ-5631

    • support for using any expression in kill statements DBZ-5636

    Fixes

    • System test-suite instabilties in CI DBZ-3862

    • Source info of incremental snapshot events exports wrong data DBZ-4329

    • Redis Sink config properties are not passed to DB history DBZ-5035

    • Memory leak in EventDeserializer caused by tableMapEventByTableId DBZ-5126

    • Postgres Incremental Snapshot on parent partitioned table not working DBZ-5240

    • Lob type data is inconsistent between source and sink, after modifying the primary key DBZ-5295

    • HTTP sink not retrying failing requests DBZ-5307

    • Caused by: java.io.EOFException: Failed to read next byte from position 2005308603 DBZ-5333

    • Unsigned tinyint conversion fails for MySQL 8.x DBZ-5343

    • NullPointerException thrown when unique index based on both system and non-system generated columns DBZ-5356

    • AWS DocumentDB (with MongoDB Compatibility) Connect Fail DBZ-5371

    • Debezium Cassandra 4 Connector not working with 1.9.4 release BUT works with 1.9.2 release DBZ-5380

    • org.postgresql.util.PSQLException: Bad value for type timestamp/date/time: CURRENT_TIMESTAMP DBZ-5384

    • Missing "previousId" property with parsing the rename statement in kafka history topic DBZ-5386

    • Check constraint introduces a column based on constraint in the schema change event. DBZ-5390

    • Clarify which database name to use for signal.data.collection when using Oracle with pluggable database support DBZ-5399

    • Timestamp with time zone column’s default values not in GMT DBZ-5403

    • Upgrade to Kafka 3.1 broke build compatibility with Kafka 2.x and Kafka 3.0 DBZ-5404

    • PostgresConnectorIT#shouldRecoverFromRetriableException fails randomly DBZ-5408

    • OracleConnectorIT waitForCurrentScnToHaveBeenSeenByConnector method can produce a NumberFormatException DBZ-5428

    • OffsetStore not stopped if it fails to fully start DBZ-5433

    • Translation from mongodb document to kafka connect schema fails when nested arrays contain no elements DBZ-5434

    • Duplicate SCNs on same thread Oracle RAC mode incorrectly processed DBZ-5439

    • Typo in sqlserver document. DBZ-5440

    • Typo in postgresql document. DBZ-5450

    • Create Index DDL fails to parse when using TABLESPACE clause with quoted identifier DBZ-5472

    • Outbox doesn’t check array consistecy properly when it detemines its schema DBZ-5475

    • Misleading statistics written to the log DBZ-5476

    • Debezium connector task didn’t retry when failover in mongodb 5 DBZ-5479

    • Oracle DATADUMP DDL cannot be parsed DBZ-5488

    • Mysql connector parser the ddl statement failed when including keyword "buckets" DBZ-5499

    • duplicate call to config.validateAndRecord() in RedisDatabaseHistory DBZ-5506

    • DDL statement couldn’t be parsed : mismatched input 'ENGINE' DBZ-5508

    • LogMiner DML parser incorrectly interprets concatenation operator inside quoted column value DBZ-5521

    • Mysql Connector DDL Parser does not parse all privileges DBZ-5522

    • SQL Server random test failures - EventProcessingFailureHandlingIT DBZ-5525

    • CREATE TABLE with JSON-based CHECK constraint clause causes MultipleParsingExceptions DBZ-5526

    • SQL Server test failure - verifyOffsets DBZ-5527

    • Support EMPTY column identifier DBZ-5550

    • Use TCCL as the default classloader to load interface implementations DBZ-5561

    • max.queue.size.in.bytes is invalid DBZ-5569

    • Vitess: Handle VStream close unepectedly DBZ-5579

    • Oracle connector parsing SELECT_LOB_LOCATOR event missing constant unavailable.value.placeholder DBZ-5581

    • Field validation errors are misleading for positive, non-zero expectations DBZ-5588

    • LIST_VALUE_CLAUSE not allowing TIMESTAMP LITERAL DBZ-5592

    • Orcale DDL does not support comments on materialized views DBZ-5595

    • Message with LSN foo larger than expected LSN bar DBZ-5597

    • Oracle DDL does not support DEFAULT ON NULL DBZ-5605

    • Datatype mdsys.sdo_geometry not supported DBZ-5609

    • MySQL connector cannot parse default value of decimal colum enclosed in double quotes DBZ-5630

    • Continuously WARNs about undo transactions when LOB is enabled DBZ-5635

    • Literal "${project.version}" in the source record instead of the actual version DBZ-5640

    • TABLE_TYPE keyword can be used as identifier DBZ-5643

    • Large numbers of ROLLBACK transactions can lead to memory leak when LOB is not enabled. DBZ-5645

    Other changes

    • Clean-up unused documentation variables DBZ-2595

    • Oracle SCAN VIP support DBZ-3987

    • Intermittent test failures on CI: EventProcessingFailureHandlingIT DBZ-4004

    • Downstream test for outbox event routing SMT DBZ-4266

    • No documentation for snapshot.include.collection.list property for Db2 connector DBZ-4345

    • Improve Filter SMT documentation / examples DBZ-4417

    • Update instructions for deploying Debezium on RHEL (downstream-only change) DBZ-5293

    • Build stable branches for connector-specific repos DBZ-5409

    • Address User guide review comments for Oracle connector DBZ-5418

    • OracleSchemaMigrationIT fails on non-pluggable (non-CDB) databases DBZ-5419

    • Update link format in shared tutorial file DBZ-5422

    • Remove community conditionalization in signaling doc for Oracle incremental and ad hoc snapshots content DBZ-5458

    • MySQL read.only property incorrectly appears in downstream documentation DBZ-5555

    • Upgrade binary log client to 0.27.2 DBZ-5620

    Release 1.9.5.Final (July 8th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.5.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Correct documentation of Event Router in expand.json mode DBZ-5296

    • Allow the maven version is greater than the current 3.8.4 DBZ-5299

    Fixes

    • Data duplication problem using postgresql source on debezium server DBZ-5070

    • Dupicate SCNs on Oracle RAC installations incorrectly processed DBZ-5245

    • NPE when using Debezium Embedded in Quarkus DBZ-5251

    • No changes to commit_scn when oracle-connector got new lob data DBZ-5266

    • database.history.store.only.captured.tables.ddl not suppressing logs DBZ-5270

    • Debezium server fail when connect to Azure Event Hubs DBZ-5279

    • ORA-01086 savepoint never established raised when database history topic cannot be created or does not exist DBZ-5281

    • Enabling database.history.store.only.captured.tables.ddl does not restrict history topic records DBZ-5285

    • Snapshot fails when table’s relational model is created using an abstract data type as unique index DBZ-5300

    • Incremental Snapshot: Oracle table name parsing does not support periods in DB name DBZ-5336

    • Support PostgreSQL default value function calls with schema prefixes DBZ-5340

    • Log a warning when an unsupported LogMiner operation is detected for a captured table DBZ-5351

    • MySQL Connector column hash v2 does not work DBZ-5366

    • Outbox JSON expansion fails when nested arrays contain no elements DBZ-5367

    • docker-maven-plugin needs to be upgraded for Mac Apple M1 DBZ-5369

    Other changes

    • Document use of JAR artifact to build Debezium scripting SMT into Kafka Connect DBZ-5227

    • Build Oracle connector by default without Maven profiles DBZ-5234

    • Remove reference to removed case insensitive option in Oracle README.md DBZ-5250

    • LogMinerHelperIT tests fail when executed against a multi-node Oracle RAC cluster DBZ-5301

    • Support skipping tests based on whether V$OPTION is enabled or disabled DBZ-5303

    • Upgrade to Apache Kafka 3.2.0 DBZ-5346

    • Oracle GitHub actions workflow no longer run tests on pushes DBZ-5349

    Release 1.9.4.Final (June 21st 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Include event scn in Oracle records DBZ-5225

    • Redis Store does not work with GCP Managed Redis DBZ-5268

    Fixes

    • Incorrect loading of LSN from offsets DBZ-3942

    • Database history recovery will retain old tables after they’ve been renamed DBZ-4451

    • Adding new table with incremental snapshots not working DBZ-4834

    • BigDecimal has mismatching scale value for given Decimal schema DBZ-4890

    • Debezium has never found starting LSN DBZ-5031

    • Cursor fetch is used for all results during connection DBZ-5084

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-5200

    • Debezium does NOT support "unix_timestamp() as DEFAULT value DBZ-5201

    • Oracle io.debezium.DebeziumException: io.debezium.DebeziumException: Failed to get transaction id for current SCN DBZ-5202

    • Debezium Postgres v1.9.3 fails in Materialize CI DBZ-5204

    • Oracle Connector failing due to ALTER TABLE for adding column with foreign key DBZ-5210

    • DDL statement couldn’t be parsed - Oracle connector 1.9.3.Final DBZ-5211

    • DDL statement couldn’t be parsed 2 - Oracle connector 1.9.3.Final DBZ-5230

    • Cannot convert field type tinyint(1) unsigned to boolean DBZ-5236

    • Oracle unparsable ddl create table DBZ-5237

    • Character set influencers are not properly parsed on default values DBZ-5241

    • Oracle LogMiner may fail with an in-progress transaction in an archive log that has been deleted DBZ-5256

    • Order of source block table names in a rename schema change event is not deterministic DBZ-5257

    • Debezium fails to connect to replicaset if a node is down DBZ-5260

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-5271

    • Deadlock during snapshot with Mongo connector DBZ-5272

    • Mysql parser is not able to handle variables in KILL command DBZ-5273

    Other changes

    • Confusing example for schema change topic DBZ-4713

    • Update cache-invalidation example DBZ-4754

    • MBean name registrations no longer correct in documentation DBZ-5153

    • Use ubi9 as the base image for Debezium UI DBZ-5199

    • Restore deleted topic heading in mongodb-outbox-event-router.adoc DBZ-5219

    • Create shared adoc fragments for specifying MBean name format in connector metrics sections DBZ-5233

    • Several Oracle tests do not get database name from TestHelper DBZ-5258

    Release 1.9.3.Final (June 2nd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Allow mongodb-connector to decode Binary payloads DBZ-4600

    • ORA-04030: out of process memory when trying to allocate 65568 bytes (Logminer LCR c,krvxrib:buffer) DBZ-4963

    • Include heartbeat table to the heartbeat process in the Debezium Oracle Connector DBZ-5119

    • Avoid reading entire schema history file into memory in the test suite DBZ-5129

    • Expose more useful exception info with building the field default value schema DBZ-5172

    Fixes

    • Error and connector stops when DDL contains lateral DBZ-4780

    • Schema changes should flush SCN to offsets if there are no other active transactions DBZ-4782

    • Connector stops streaming after a re-balance DBZ-4792

    • MySQL connector increment snapshot failed parse datetime column lenth when connector set "snapshot.fetch.size": 20000 DBZ-4939

    • [MySQL Debezium] DDL Parsing error - CREATE OR REPLACE TABLE DBZ-4958

    • InstanceAlreadyExistsException during MongoDb connector metrics registration DBZ-5011

    • DateTimeParseException: Text 'infinity' could not be parsed in Postgres connector DBZ-5014

    • Fix inconsistent transaction id when handling transactional messages in Vitess connector DBZ-5063

    • Debezium MCS Error when changing Postgres port DBZ-5067

    • 4 Connections per connector (postgres) DBZ-5074

    • Oracle documentation refers to archive_log_target rather than archive_lag_target DBZ-5076

    • 'ALTER TABLE mytable DROP FOREIGN KEY IF EXISTS mytable_fk' no viable alternative at input 'ALTER TABLE mytable DROP FOREIGN KEY IF' DBZ-5077

    • Oracle Logminer: records missed during switch from snapshot to streaming mode DBZ-5085

    • Interrupting a snapshot process can hang for some JDBC drivers DBZ-5087

    • Debezium fails to undo change event due to transaction id ending in ffffffff with LogMiner DBZ-5090

    • Postgresql connector does not retry one some errors when postgres is taken offline DBZ-5097

    • Parsing zero day fails DBZ-5099

    • Cannot Set debezium.sink.kafka.producer.ssl.endpoint.identification.algorithm to empty value DBZ-5105

    • Debezium connector failed with create table statement DBZ-5108

    • Current version of surefire/failsafe skips tests on failure in BeforeAll DBZ-5112

    • Test IncrementalSnapshotIT##schemaChanges fails randomly DBZ-5131

    • Cannot parse default value 0.000000000000000000 for bigint column DBZ-5134

    • MilliSecondsBehindSource is not reported by SQL Server connector DBZ-5137

    • Restarting mysql connector task fails with: java.lang.RuntimeException: Unable to register the MBean DBZ-5138

    • No raising of "WARN Event for transaction X has already been processed, skipped." DBZ-5140

    • Oracle connector restarts after ORA-01291 DBZ-5148

    • TestContainers method DebeziumContainer#getConnectorTaskState can raise a NullPointerException DBZ-5159

    • ExtractNewRecordState SMT Replaces Null Value with Column’s Default Value DBZ-5166

    • Oracle connector metrics tracking of rollback and abandoned transactions may cause high memory usage DBZ-5179

    • Debezium official documentation typo DBZ-5040

    • Should be sleep with REGISTRATION_RETRY_DELAY when registry MBean failed DBZ-5141

    Other changes

    • Restructure documentation for custom converters DBZ-4588

    • Document xmin.fetch.interval.ms property for Postgres connector DBZ-4734

    • Add FAQ about ORA-01882 and Oracle 11 to documentation DBZ-5057

    • Rename "Mysql" to "MySql" in related MysqlFieldReader interface DBZ-5078

    • Remove auto-generation and default values for MySQL database.server.id DBZ-5101

    • Upgrade Jackson Databind to 2.13.2.2 DBZ-5107

    • Switch to released version of Fixture5 extension in System testsuite DBZ-5114

    • Use range to activate jdk11 profile DBZ-5118

    • Misc edits to prepare Oracle connector docs for GA DBZ-5132

    • Pro-actively detect issues with LogMiner records DBZ-5147

    • Align Postgresql driver with Quarkus DBZ-5060

    Release 1.9.2.Final (April 29th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • adjust LogMiner batch size based on comparison with currently used batch size DBZ-5005

    Fixes

    • Connector throws java.lang.ArrayIndexOutOfBoundsException DBZ-3848

    • Document no relevant tables should be in the SYS or SYSTEM tablespaces. DBZ-4762

    • Unable to mine Oracle source table which have "/" in table names DBZ-5006

    • SQL Server in multi-partition mode fails if a new database is added to an existing configuration DBZ-5033

    • Debezium Server tarball 1.9.1 does not work DBZ-5037

    • Mysql tests start before MySQL DB constainer is running DBZ-5054

    • Debezium server configuration properties not rendered correctly DBZ-5058

    Other changes

    There are no other changes in this release.

    Release 1.9.1.Final (April 21st 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Extract component preparation from test-suite job DBZ-4601

    • Making Postgres PSQLException: This connection has been closed. retriable DBZ-4948

    Fixes

    • Simplify and clean up system testsuite job DBZ-4570

    • Getting java.sql.SQLException: ORA-01291: missing logfile while running with archive log only DBZ-4879

    • Debezium uses wrong LCR format for Oracle 12.1 DBZ-4932

    • Oracle duplicates on connector restart DBZ-4936

    • Oracle truncate causes exception DBZ-4953

    • NPE caused by io.debezium.connector.oracle.antlr.listener.ColumnDefinitionParserListener.resolveColumnDataType DBZ-4976

    • Oracle connector may throw NullPointerException when stopped after an unsuccessful startup DBZ-4978

    • NPE for non-table related DDLs DBZ-4979

    • CTE statements aren’t parsed by MySQL connector DBZ-4980

    • Unsupported MySQL Charsets during Snapshotting for fields with custom converter DBZ-4983

    • Outbox Transform does not allow expanded payload with additional fields in the envelope DBZ-4989

    • Redis Sink - clientSetname is taking place before auth DBZ-4993

    • CLOB with single quotes causes parser exception DBZ-4994

    • Oracle DDL parser fails on references_clause with no column list DBZ-4996

    • Can’t use 'local' database through mongos DBZ-5003

    • Triggering Incremental Snapshot on MongoDB connector throws json parsing error DBZ-5015

    • Redis Sink - Check if client is not null before closing it DBZ-5019

    Other changes

    • QE jenkins jobs consolidation DBZ-4235

    • Create trigger job for connector jobs DBZ-4558

    • Debezium UI dependency updates DBZ-4881

    • Read-only incremental snapshots blog post DBZ-4917

    • Update Pulsar client version used by Debezium Server DBZ-4961

    • Intermittent failure of RedisStreamIT.testRedisConnectionRetry DBZ-4966

    • Debezium raised an exception and the task was still running DBZ-4987

    • Nexus Staging Maven plugin is incompatible with OpenJDK 17 DBZ-5025

    • OracleOffsetContextTest should be scoped to LogMiner only DBZ-5028

    • Scope several new Oracle tests to LogMiner only DBZ-5029

    Release 1.9.0.Final (April 5th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Ability to support all Redis connection schemes DBZ-4511

    • pass SINK config properties to OffsetStore and DatabaseHistory adapters DBZ-4864

    • Migrate test-suite fixtures to JUnit extension DBZ-4892

    • Use Jedis' clientSetname when establishing Redis connections DBZ-4911

    Fixes

    • MySQL connector fails to parse default integer value expressed as decimal DBZ-3541

    • Cannot use Secrets in Debezium server connector config DBZ-4742

    • spatial_ref_sys table should be excluded in Postgres connector DBZ-4814

    • Oracle: Parsing failed for SEL_LOB_LOCATOR sql: 'DECLARE DBZ-4862

    • Oracle connector stops calling logminer without any error message DBZ-4884

    • Single quotes replication DBZ-4891

    • Oracle keeps trying old scn even if it had no changes DBZ-4907

    • Redis Sink - using Transaction does not work in sharded Redis DBZ-4912

    • Oracle connector page have typo since version 1.5. DBZ-4913

    • CVE-2022-26520 jdbc-postgresql: postgresql-jdbc: Arbitrary File Write Vulnerability [rhint-debezium-1] DBZ-4916

    • Kafka topics list throw exception DBZ-4920

    • Spelling mistake in doc about Oracle metrics DBZ-4926

    • MariaDB Trigger Parsing Error DBZ-4927

    • NPE during snapshotting MySQL database if custom converters present and column is null DBZ-4933

    • Avro converter requires Guava in lib directory DBZ-4935

    • Debezium Server 1.9 Fails to start up when transferring 1.8 offsets DBZ-4937

    • Missing images for 1.9.0.Beta1 and 1.9.0.CR1 releases DBZ-4943

    Other changes

    • Document "schema.include.list"/"schema.exclude.list" for SQL Server connector DBZ-2793

    • Align decimal.handling.mode documentation for Oracle like other connectors DBZ-3317

    • Use Red Hat Maven repo for custom build image in docs DBZ-4392

    • Upgrade postgres driver to version 42.3.3 DBZ-4919

    • Update Quality Outreach workflow to official Oracle Java GH action DBZ-4924

    • Bump jackson to 2.13.2 DBZ-4955

    Release 1.9.0.CR1 (March 25th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add support for Cassandra 4.x DBZ-2514

    • Exclude dummy events from database history DBZ-3762

    • Define how MCS container images should be build DBZ-4006

    • Document kafka-connect-offset related properties DBZ-4014

    • Update UI dependency and it’s configuration accordingly  DBZ-4636

    • Save and load database history in Redis DBZ-4771

    • Provide the Federated module UI component for DBZ Connector edit Flow DBZ-4785

    • Switch to fabric8 model provided by Apicurio team DBZ-4790

    • Merge the Data and Runtime option page in federated component. DBZ-4804

    • Add task id and partition to the logging context for multi-partition connectors DBZ-4809

    • run.sh is not working in windows environment DBZ-4821

    • Log the tableId is null when filter out some tables DBZ-4823

    • Debezium Mysql connector can’t handle CREATE INDEX IF NOT EXISTS (MariaDB) DBZ-4841

    • Postgresql connector prints uninformative log on snapshot phase DBZ-4861

    Fixes

    • SchemaNameAdjuster is too restrictive by default DBZ-3535

    • CVE-2022-21363 mysql-connector-java: Difficult to exploit vulnerability allows high privileged attacker with network access via multiple protocols to compromise MySQL Connectors [rhint-debezium-1] DBZ-4758

    • java.lang.NullPointerException while handling DROP column query DBZ-4786

    • Not reading the keystore/truststore when enabling MySQL SSL authentication DBZ-4787

    • "DebeziumException: Unable to find primary from MongoDB connection" post upgrade to 1.8.1 DBZ-4802

    • Oracle TO_DATE cannot be parsed when NLS parameter is provided DBZ-4810

    • Oracle test FlushStrategyIT fails DBZ-4819

    • Mysql: Getting ERROR Failed due to error: connect.errors.ConnectException: For input string: "false" DBZ-4822

    • Expect the null value with snapshot CapturedTables metric when skipping snapshotting DBZ-4824

    • MySQL 5.7 - no viable alternative at input 'ALTER TABLE ORD_ALLOCATION_CONFIG CHANGE RANK' DBZ-4833

    • missing notes on using db2 connector DBZ-4835

    • ParsingException when adding a new table to an existing oracle connector DBZ-4836

    • Supplemental log check fails when restarting connector after table dropped DBZ-4842

    • CREATE_TOPIC docker image regression DBZ-4844

    • Logminer mining session stopped due to several kinds of SQL exceptions DBZ-4850

    • DDL statement couldn’t be parsed DBZ-4851

    • Gracefully pass unsupported column types from DDL parser as OracleTypes.OTHER DBZ-4852

    • Debezium oracle connector stopped because of Unsupported column type: LONG DBZ-4853

    • Compilation of SqlServerConnectorIntegrator fails DBZ-4856

    • Maven cannot compile debezium-microbenchmark-oracle DBZ-4860

    • oracle connector fails because of Supplemental logging not properly configured DBZ-4869

    • Re-read incremental snapshot chunk on DDL event DBZ-4878

    • oracle connector fails because of unsupported column type nclob DBZ-4880

    • Debezium throws CNFE for Avro converter DBZ-4885

    Other changes

    • OpenShift deployment instruction improvements DBZ-2594

    • Add Kubernetes version of deployment page DBZ-2646

    • Log DML replication events instead of throwing an error DBZ-3949

    • Review SqlServerConnector properties DBZ-4052

    • Promote Outbox Quarkus extension to stable DBZ-4430

    • Restructure Oracle connector documentation DBZ-4436

    • Downstream docs for outbox event routing SMTs DBZ-4652

    • Promote incremental snapshots to stable and GA DBZ-4655

    • Remove legacy --zookeeper option from example instructions DBZ-4660

    • Use JdbcConfiguration instead of Configuration for JDBC config values DBZ-4801

    • Don’t set truststore/keystore parameters to system variables DBZ-4832

    • Docs: JDBC driver should go to Oracle connector dir DBZ-4883

    Release 1.9.0.Beta1 (March 3rd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Support Knative Eventing DBZ-2097

    • Provide UI option to view the configuration of the registered Debezium connector DBZ-3137

    • Handle out of order transaction start event DBZ-4287

    • Partition-scoped metrics for the SQL Server connector DBZ-4478

    • Save and load offsets in Redis DBZ-4509

    • Debezium Deploy Snapshots job is blocked for a long time DBZ-4628

    • Change DBZ UI Frontend to use new data_shape fields for Kafka message format DBZ-4714

    • Expect plain value instead of scientific exponential notation when using decimal string mode DBZ-4730

    Fixes

    • Long running transaction in Debezium 1.2.0 (PostgreSQL) DBZ-2306

    • "snapshot.include.collection.list" doesn’t work with the new MySQL connector implementation DBZ-3952

    • When running the NPM build I always end up with an updated/diverged package-lock.json DBZ-4622

    • Upgrade of Oracle connector causes NullPointerException DBZ-4635

    • Oracle-Connector fails parsing a DDL statement (external tables) DBZ-4641

    • oracle-connector DDL statement couldn’t be parsed DBZ-4662

    • Oracle parsing error for ALTER TABLE EXT_SIX LOCATION DBZ-4706

    • MySQL unparseable DDL - CREATE PROCEDURE DBZ-4707

    • Source timestamp timezone differs between snapshot and streaming records DBZ-4715

    • Document that Oracle Xstream emits DBMS_LOB method calls as separate events DBZ-4716

    • ORA-00308 raised due to offset SCN not being updated in a low traffic environment DBZ-4718

    • Property "log.mining.view.fetch.size" does not take effect DBZ-4723

    • Postgres debezium send wrong value of column has default NULL::::character varying in kafka message DBZ-4736

    • Oracle Logminer: streaming start offset is off by one DBZ-4737

    • Apache Pulsar example doesn’t work DBZ-4739

    • Oracle dbname/signal with dots parsed incorrectly DBZ-4744

    • Oracle DDL statement couldn’t be parsed DBZ-4746

    • Overly verbose Debezium Server Redis logs DBZ-4751

    • DDL statement couldn’t be parsed DBZ-4752

    • Redis runs OOM log in wrong scenario DBZ-4760

    • Relax parsing of Heap and Index organized DDL clauses DBZ-4763

    • java.lang.NoSuchMethodError: org.apache.kafka.clients.admin.NewTopic DBZ-4773

    • Connection validation fails for Db2 DBZ-4777

    • Test suite unable to run due to jackson dependency overlaps DBZ-4781

    Other changes

    • Improve rendering of linked option names DBZ-4301

    • Oracle connector downstream docs for 1.9 DBZ-4325

    • Use images from quay.io in docs and examples DBZ-4440

    • Create an internal FAQ for Oracle Connector DBZ-4557

    • Improve documentation about max_replication_slots DBZ-4603

    • Connector doc formatting and link fixes DBZ-4606

    • Add a backend service for UI to fetch the connector configuration DBZ-4627

    • Update downstream Getting Started guide to describe revised deployment mechanism DBZ-4632

    • Update downstream OCP Installation guide to describe revised deployment mechanism DBZ-4633

    • Changes config for renovate bot to auto-merge only for non-major update DBZ-4719

    • Incorrect connector version in Debezium RHEL Installation Guide DBZ-4721

    • Verify Debezium connector can be used with MongoDB Atlas DBZ-4731

    • Remove NATS example DBZ-4738

    • Upgrade to Quarkus 2.7.1.Final DBZ-4743

    • UI layout fixes DBZ-4748

    • Upgrade MySQL JDBC driver to 8.0.28 DBZ-4759

    • Nightly build artifacts not published DBZ-4766

    • Clarify need for link attributes in docs DBZ-4776

    Release 1.9.0.Alpha2 (February 9th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    For the incubating Debezium connector for Vitess, the mapping of BLOB and BINARY column types has changed from string to bytes (DBZ-4705).

    New features

    • Use main repo workflow for CI/CD checks in Debezium UI repository checks DBZ-3143

    • Build and deploy Debezium OpenAPI / JSON Schema definitions with every Debezium release DBZ-4394

    • Redis sink - Retry in case of connection error/OOM DBZ-4510

    • Make KAFKA_QUERY_TIMEOUT configurable DBZ-4518

    • MySQL history topic creation needs DESCRIBE_CONFIGS at the Cluster level DBZ-4547

    • Redis Sink - change records should be streamed in batches DBZ-4637

    • Link for apicurio-registry-distro-connect-converter packege is broken DBZ-4659

    • Extend Debezium Schema Generator DBZ-4665

    Fixes

    • Database.include.list results in tables being returned twice DBZ-3679

    • Suspected inconsistent documentation for 'Ad-hoc read-only Incremental snapshot' DBZ-4171

    • CVE-2021-2471 mysql-connector-java: unauthorized access to critical [rhint-debezium-1] DBZ-4283

    • Rhel preparation jenkins job pushes extra image DBZ-4296

    • Oracle Logminer: snapshot→stream switch misses DB changes in ongoing transactions DBZ-4367

    • Incremental snapshots does not honor column case sensitivity DBZ-4584

    • JSON data corrupted in update events DBZ-4605

    • nCaused by: Multiple parsing errors\nio.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira DBZ-4609

    • Jenkins job for creating image snapshot does not update gitlab certificate correctly DBZ-4611

    • Update the UI README node and npm requirements DBZ-4630

    • Parse including keyword column table ddl error DBZ-4640

    • Nightly installation links do not use snapshot repository download links DBZ-4644

    • schema_only_recovery mode not working for FileDatabaseHistory DBZ-4646

    • SQL Server ad-hoc snapshot - SnapshotType is case sensitive DBZ-4648

    • DDL parsing issue: ALTER TABLE …​ MODIFY PARTITION …​ DBZ-4649

    • Mark incompatible Xstream tests as LogMiner only DBZ-4650

    • DDL statement couldn’t be parsed mismatched input '`encrypted` DBZ-4661

    • debezium-examples fail when using confluentinc/cp-schema-registry:7.0.0 DBZ-4666

    • DDL parsing exception DBZ-4675

    • JdbcConnection#executeWithoutCommitting commits when auto-commit is enabled DBZ-4701

    • OracleSchemaMigrationIT fails with Xstream adapter DBZ-4703

    • Cannot expand JSON payload with nested arrays of objects DBZ-4704

    Other changes

    • Possible performance issue after Debezium 1.6.1 upgrade (from 1.5) DBZ-3872

    • Upgrade Jenkins and Introduce JCasC to jnovotny DBZ-3980

    • Random test failure - ZZZGtidSetIT#shouldProcessPurgedGtidSet DBZ-4294

    • Verify compatibility with Oracle 21c (21.3.0.0.0) DBZ-4305

    • Add metadata to OracleConnectorConfig for Debezium UI DBZ-4314

    • Release pipeline should check existence of GA version DBZ-4623

    • Release pipeline - conditionalize and parameterize backport check DBZ-4624

    • Migrating UI from webpack-dev-server v3 to v4 DBZ-4642

    • Don’t run checkstyle/dependency check on documentation-only pull requests or commits DBZ-4645

    • Cron-based Github Action to notify documentation changes in last x days DBZ-4653

    • Oracle DDL parser failure with supplemental log group clause with a custom name DBZ-4654

    • Build MCS container images for Debezium 1.9.0.Alpha1 and deploy to RHOAS quay container registry DBZ-4656

    • Upgrade postgres driver to version 42.3.2 DBZ-4658

    • Make sure right protoc version is applied DBZ-4668

    • Build trigger issues DBZ-4672

    • MongoUtilIT test failure - unable to connect to primary DBZ-4676

    • Upgrade to Quarkus 2.7.0.Final DBZ-4677

    • Update shared UG deployment file for use with downstream OCP Install Guide DBZ-4700

    • Indicate ROWID is not supported by XStream DBZ-4702

    Release 1.9.0.Alpha1 (January 26th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 1.9.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 1.9.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 1.9.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Support for the wal2json logical decoding plug-in, as used by the Debezium Postgres connector, has been deprecated. All users should move to the pgoutput or decoderbufs plug-ins. The wal2json plug-in is scheduled for removal in Debezium 2.0 (DBZ-3953).

    There were changes to the format of the Infinispan cache to support the storage of certain large object (LOB) events. If you use the Infinispan buffer cache implementation and enabled LOB support, the cache files must be deleted and a new snapshot taken due to these compatibility changes (DBZ-4366).

    The Debezium container images for Apache Kafka and Kafka Connect contain the log4j 1.x library, which is a runtime dependency of Kafka (it is not used in any way by Debezium). Several vulnerabilities were recently reported against some classes contained in that library. While these classes are used by neither Kafka (Connect) nor Debezium, the class files org/apache/log4j/net/JMSSink.class, org/apache/log4j/jdbc/, and /org/apache/log4j/chainsaw/ have been removed from the log4j 1.x JAR shipped with these container images as a measure of caution. If you actually need these classes, you should obtain the original log4j 1.x JAR and add this via custom images you derive from the Debezium ones. We advise against doing this though as per aforementioned vulnerabilities (DBZ-4568).

    New features

    • Debezium MySQL connector encounter latency in large DML of MySQL DBZ-3477

    • Add create/update/delete event seen metrics for monitor upstream dml operation DBZ-4351

    • Allow additional config options for Debezium Server Pubsub Connector DBZ-4375

    • Allow adhoc snapshots using signals in Oracle versions prior to 12c DBZ-4404

    • Fail MongoDB start when oplog is used for MongoDB 5+ DBZ-4415

    • Deprecated TruncateHandlingMode config property in favor of skipped_operations DBZ-4419

    • Introduce interfaces and default implementations for change event source metrics DBZ-4459

    • Create a Debezium schema generator for Debezium connectors (follow-up work) DBZ-4460

    • Make connector task partition readability for logs DBZ-4472

    • Remove unused brackets in MySqlParser DBZ-4473

    • Document DB permissions for Oracle Connector DBZ-4494

    • Add support for extra gRPC headers in Vitess connector DBZ-4532

    • Mining session stopped due to 'No more data to read from socket' DBZ-4536

    • A failure to register JMX metrics should fail the connector DBZ-4541

    • Debezium Engine should use topic names for conversion DBZ-4566

    • Allow user to define custom retriable message DBZ-4577

    • Implement Renovate to fix legacy-peer-deps issue with npm DBZ-4585

    • Typo in connect README DBZ-4589

    • Unsupported column type 'ROWID' error DBZ-4595

    • Cleanup project management in testsuite job DBZ-4602

    Fixes

    • NPE on PostgreSQL Domain Array DBZ-3657

    • MysqlSourceConnector issue with latin1 tables DBZ-3700

    • JSON Payload not expanding when enabling it DBZ-4457

    • Kafka Connect REST extension cannot be built with 1.9 DBZ-4465

    • DDL statement couldn’t be parsed DBZ-4485

    • Parse multiple signed/unsigned keyword from ddl statement failed DBZ-4497

    • Set the correct binlog serverId & threadId DBZ-4500

    • Null out query in read-only incremental snapshot DBZ-4501

    • R/O incremental snapshot can blocks the binlog stream on restart DBZ-4502

    • Drop the primary key column getting exception DBZ-4503

    • [MySQL Debezium] DDL Parsing error - curdate() & cast() DBZ-4504

    • Extra file checker-qual in PostgreSQL package DBZ-4507

    • website-builder image is not buildable DBZ-4508

    • Job for creating gold image not reading credentials correctly DBZ-4516

    • Replication stream retries are not configured correctly DBZ-4517

    • Add backend errors among retriable for Postgres connector DBZ-4520

    • Infinispan doesn’t work with underscores inside cache names DBZ-4526

    • Connector list should update immediately when a connector is deleted DBZ-4538

    • Mongo filters page show nulls in namespace name DBZ-4540

    • LogMinerHelperIT fails when running Oracle CI with a fresh database DBZ-4542

    • Oracle-Connector fails parsing a DDL statement (VIRTUAL keyword) DBZ-4546

    • DatabaseVersionResolver comparison logic skips tests unintendedly DBZ-4548

    • io.debezium.text.ParsingException when column name is 'seq' DBZ-4553

    • MySQL FLUSH TABLE[S] with empty table list not handled DBZ-4561

    • Debezium apicurio version is not aligned with Quarkus DBZ-4565

    • Oracle built-in schema exclusions should also apply to DDL changes DBZ-4567

    • mongo-source-connector config database.include.list does not work DBZ-4575

    • Can’t process column definition with length exceeding Integer.MAX_VALUE DBZ-4583

    • Oracle connector can’t find the SCN DBZ-4597

    Other changes

    • Set up CI for Oracle DBZ-732

    • Migrate logger used for tests to Logback DBZ-2224

    • Update downstream docs in regards to deprecated elements DBZ-3881

    • Broken links to the Transaction metadata topics from descriptions for provide.transaction.metadata property DBZ-3997

    • Add script to check for missing backports DBZ-4063

    • Protect release from using invalid version name DBZ-4072

    • Upgrade to Quarkus 2.6.2.Final DBZ-4117

    • Use Postgres 10 by default DBZ-4131

    • Give debezium-builder user privileges to access internal issues DBZ-4271

    • Point to supported versions in connector pages DBZ-4300

    • Allow for additional custom columns in an outbox table DBZ-4317

    • Log problematic values if they cannot be processed DBZ-4371

    • Run Jenkins CI on weekends too DBZ-4373

    • Update Postgres JDBC driver to 42.3.1 DBZ-4374

    • Release pipeline should use Jira API token DBZ-4383

    • Remove log.mining.log.file.query.max.retries configuration property DBZ-4408

    • Add Debezium Server example using Postgres and Pub/Sub DBZ-4438

    • Document Outbox SMT behaviour with postgres bytea_output = escape DBZ-4461

    • Run formatting check in the same connector/module workflows DBZ-4462

    • Upgrade SQL Server driver to 9.4 DBZ-4463

    • Add snapshot repository to Vitess connector DBZ-4464

    • REST extension tests must not depend on source code version DBZ-4466

    • snapshotPreceededBySchemaChange should not be tested for Db2 DBZ-4467

    • Debezium Server workflow should build PG connector without tests DBZ-4468

    • PostgresShutdownIT must not depend on Postgres version DBZ-4469

    • Updating jenkins job creating image snapshots DBZ-4486

    • Set jenkins jobs to store last 10 builds DBZ-4506

    • Provide a script to generate release notes section DBZ-4513

    • Remove INTERNAL_KEY_CONVERTER and INTERNAL_VALUE_CONVERTER env vars DBZ-4514

    • Bump protobuf version to the latest 3.x DBZ-4527

    • Document automatic log-switch setting for low-frequency change systems DBZ-4528

    • Organize properties of Db2 connector DBZ-4537

    • Update release procedure to cover required documentation config changes DBZ-4539

    • Module debezium-testing-testcontainers tests are not executed DBZ-4544

    • Check Debezium user logging after auth change DBZ-4545

    • Fix links to connector incremental snapshots topic DBZ-4552

    • Vitess connector image cannot be built DBZ-4559

    • Reduce GitHub action build times with formatting DBZ-4562

    • Doc updates to address downstream build issues DBZ-4563

    • Upgrade Avro converter to 7.0.1 and Apicurio to 2.1.5.Final DBZ-4569

    • Older degree of parallelism DDL syntax causes parsing exception DBZ-4571

    • Conditionalize note about outbox event router incompatibility DBZ-4573

    • Update description of snapshot.mode in postgresql.adoc DBZ-4574

    • Avoid build warning about maven-filtering missing plugin descriptor DBZ-4580

    • Fix build failure when xstream missing when building the micro benchmark for Oracle DBZ-4581

    • Update shared UG deployment file to clarify that connectors can use existing KC instance DBZ-4582

    • Test Failure - RecordsStreamProducerIT DBZ-4592

    • Upgrade Kafka to 3.1.0 DBZ-4610

    • Server transformation properties should refer to "type" rather than "class" DBZ-4613

    \ No newline at end of file diff --git a/releases/2.0/index.html b/releases/2.0/index.html index aec186a043..edcfa8174e 100644 --- a/releases/2.0/index.html +++ b/releases/2.0/index.html @@ -1 +1 @@ - Debezium Release Series 2.0

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.29
    MongoDB Database: 4.0, 4.2, 4.4, 5.0, 6.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.4.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0, 21.3.0.0, 21.4.0.0, 21.5.0.0, 21.6.0.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.0.0.CR1

    2022-10-07

    2.0.0.Beta2

    2022-09-16
    New parameter naming schema; All schemas properly named; Connector restarted by default; MySQL binlog compression supported; MongoDB connection string support; Cassandra 4 connector processes incremental commit log changes; Pause/resume of incremental snapshots; Custom SQL filter for incremental snapshots; Multiple tasks supported in Vitess connector
    \ No newline at end of file + Debezium Release Series 2.0

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.29
    MongoDB Database: 4.0, 4.2, 4.4, 5.0, 6.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.4.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0, 21.3.0.0, 21.4.0.0, 21.5.0.0, 21.6.0.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.0.0.CR1

    2022-10-07

    2.0.0.Beta2

    2022-09-16
    New parameter naming schema; All schemas properly named; Connector restarted by default; MySQL binlog compression supported; MongoDB connection string support; Cassandra 4 connector processes incremental commit log changes; Pause/resume of incremental snapshots; Custom SQL filter for incremental snapshots; Multiple tasks supported in Vitess connector
    \ No newline at end of file diff --git a/releases/2.0/release-notes.html b/releases/2.0/release-notes.html index c5b15d38b1..5c86b3f9e0 100644 --- a/releases/2.0/release-notes.html +++ b/releases/2.0/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 2.0

    Release Notes for Debezium 2.0

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.0.1.Final (December 7th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Improve LogMiner query performance by reducing REGEXP_LIKE disjunctions DBZ-5648

    • Replace obsolete DebeziumDownload attribute DBZ-5835

    • Reduce container image sizes by consolidating operations per layer DBZ-5864

    Fixes

    • Using snapshot boundary mode "all" causes DebeziumException on Oracle RAC DBZ-5302

    • ORA-01003: no statement parsed DBZ-5352

    • Missing snapshot pending transactions DBZ-5482

    • Db2 documentation refers to invalid SMALLMONEY and MONEY data types DBZ-5504

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • Oracle SQL parsing error when collation used DBZ-5726

    • Unparseable DDL statement DBZ-5734

    • NullPointerException thrown during snapshot of tables in Oracle source connector DBZ-5738

    • Remove note from snapshot metrics docs file that flags incremental snapshots as TP feature DBZ-5748

    • Exclude Oracle Compression Advisor tables from capture to avoid infinite loop DBZ-5756

    • More Oracle logging DBZ-5759

    • Oracle should only log row contents at TRACE level DBZ-5760

    • Outbox Router documentation outdated regarding value converter DBZ-5770

    • Using DBMS_LOB.ERASE by itself can lead to an unexpected UPDATE with null BLOB value DBZ-5773

    • PostgreSQL missing metadata info DBZ-5789

    • Oracle connector does not attempt restart when ORA-01089 exception is nested DBZ-5791

    • Message with LSN 'LSN{XYZ}' not present among LSNs seen in the location phase DBZ-5792

    • Mysql connector alter table with database name parse failed DBZ-5802

    • Conflicting documentation for snapshot.mode property in MongoDB connector v2.0 DBZ-5812

    • 'topic.prefix' default value in MongoDB connector v2.0 DBZ-5817

    • Quarkus outbox extention never finishes the open tracing span DBZ-5821

    • fix names of range fields in schema to comply with Avro standard DBZ-5826

    • CREATE/ALTER user does not support COMMENT token DBZ-5836

    • Invalid Java object for schema with type FLOAT64: class java.lang.Float DBZ-5843

    • IllegalStateException is thrown if task is recovering while other tasks are running DBZ-5855

    • CREATE/ALTER user does not support ATTRIBUTE token DBZ-5876

    Other changes

    • SQL Server connector docs should mention multi-task support DBZ-5714

    • Debezium connectors ship with an old version of google-protobuf vulnerable to CVE-2022-3171 DBZ-5747

    • Upgrade to Quarkus 2.14.CR1 DBZ-5774

    • Upgrade postgres driver to version 42.5.0 DBZ-5780

    • Upgrade to Quarkus 2.14.0.Final DBZ-5786

    • Doc Typo in cloudevents DBZ-5788

    • Add ORA-01555 to Oracle documentation DBZ-5816

    • GitHub Actions: Deprecating save-state and set-output commands DBZ-5824

    • Upgrade wildfly-elytron to 1.15.5 / 1.16.1 due to CVE-2021-3642 DBZ-5854

    Release 2.0.0.Final (October 14th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Final plugin files, and restart the connector using the new configuration parameter names. Upon restart, the 2.0.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    PostgreSQL transaction id is 32-bit integer and gets rolled over. To simplify deduplication of transactions the LSN was added as a part of the identifier (DBZ-5329).

    New features

    There are no new features in this release.

    Fixes

    • ORA-01289: cannot add duplicate logfile DBZ-5276

    • Function DATE_ADD can be used as an identifier DBZ-5679

    • MySqlConnector parse create view statement failed DBZ-5708

    • The DDL_FILTER of SchemaHistory doesn’t work for including break lines ddl statement DBZ-5709

    • Debezium Server 1.9.6 is using MSSQL JDBC 7.2.2 instead of 9.4.1 DBZ-5711

    • Invalid prop names in MongoDB outbox router docs DBZ-5715

    • tests are running forever DBZ-5718

    • cassandra connector first startup ever may fail DBZ-5719

    • Vitess: Handle Vstream error: unexpected server EOF DBZ-5722

    • ParsingException: DDL statement couldn’t be parsed (index hints) DBZ-5724

    Other changes

    • Remove whilelisted/blacklisted from log messages DBZ-5710

    • MySqlSchemaMigrationIT runs failed DBZ-5728

    Release 2.0.0.CR1 (October 7th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.CR1 plugin files, and restart the connector using the new configuration parameter names. Upon restart, the 2.0.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Default behaviour for schema.name.adjustment.mode is now none. While the avro was safer option in case the user was using Avro converter it was confusing in the more frequent cases when default JSON converter was used. (DBZ-5541).

    New features

    • Implement retries for Debezium embedded engine DBZ-4629

    • MySqlErrorHandler should handle SocketException DBZ-5486

    • Traditional snapshot process setting source.ts_ms DBZ-5591

    • Clean up "logical name" config DBZ-5594

    • Upgrade Kafka client to 3.3.1 DBZ-5600

    • When writing docs, use website stylesheet for IDE preview in IntelliJ DBZ-5616

    • Support READ ONLY/ENCRYPTION options for alter database statment DBZ-5622

    • Clarify semantics of include/exclude options DBZ-5625

    • Added support for Mongo pre-image in change stream DBZ-5628

    • Support for seting stats_sample_pages=default in alter table statements DBZ-5631

    • support for using any expression in kill statements DBZ-5636

    • Logging enhancement for non-incremental snapshot in postgres connector DBZ-5639

    • Support set statement in mariadb DBZ-5650

    • Add Mongo-initiator 6.0 container image DBZ-5666

    • Remove logic name parameter from sub connector config DBZ-5671

    Fixes

    • ConvertingEngineBuilder looses the accents DBZ-4213

    • Debezium Db2 Connector fails to handle default values in schema when is making the snapshot DBZ-4990

    • Debezium 2.0.0.Beta1 Azure SQL breaking change DBZ-5496

    • Oracle connector parsing SELECT_LOB_LOCATOR event missing constant unavailable.value.placeholder DBZ-5581

    • Starting Embedded Engine swallows ClassNotFoundException so user cannot see why engine does not work DBZ-5583

    • Message with LSN foo larger than expected LSN bar DBZ-5597

    • Fix broken anchors in docs DBZ-5618

    • DDL Parsing Error DBZ-5623

    • MySQL connector cannot parse default value of decimal colum enclosed in double quotes DBZ-5630

    • Support grant LOAD FROM S3, SELECT INTO S3, INVOKE LAMBDA with aws mysql DBZ-5633

    • Continuously WARNs about undo transactions when LOB is enabled DBZ-5635

    • Literal "${project.version}" in the source record instead of the actual version DBZ-5640

    • TABLE_TYPE keyword can be used as identifier DBZ-5643

    • Large numbers of ROLLBACK transactions can lead to memory leak when LOB is not enabled. DBZ-5645

    • Race in DebeziumContainer during startup DBZ-5651

    • Outbox pattern nested payload leads to connector crash DBZ-5654

    • Allow the word STATEMENT to be a table / column name DBZ-5662

    • ValidatePostgresConnectionIT.testInvalidPostgresConnection fails DBZ-5664

    • Hardcoded driver task properties are not being passed to underlying connections DBZ-5670

    • Keyword virtual can be used as an identifier DBZ-5674

    • MongoDB Connector with DocumentDB errors with "{$natural: -1} is not supported" DBZ-5677

    Other changes

    • Align connector properties to have an empty default cell if property has no default DBZ-3327

    • Improve Filter SMT documentation / examples DBZ-4417

    • Test failure on CI: SqlServerConnectorIT#updatePrimaryKeyTwiceWithRestartInMiddleOfTx DBZ-4475

    • Intermittent test failure: SqlServerConnectorIT#updatePrimaryKeyWithRestartInMiddle() DBZ-4490

    • Edit content newly added to the MongoDB connector doc DBZ-5542

    • Upgrade apicurio to 2.2.5.Final DBZ-5549

    • Modify the Instantiator to not require classloader DBZ-5585

    • Use quay.io in test containers DBZ-5603

    • Remove records from being logged at all levels DBZ-5612

    • Upgrade binary log client to 0.27.2 DBZ-5620

    • Allow to change docker maven properties from command line DBZ-5657

    • Update docker maven plugin DBZ-5658

    • Run UI tests on all connector changes DBZ-5660

    • Cleanup UI e2e tests after removing default value for topic.prefix DBZ-5667

    Release 2.0.0.Beta2 (September 16th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.1 and has been tested with version 3.2.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Beta2 plugin files, and restart the connector using the new configuration parameter names. Upon restart, the 2.0.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Object sizes for memory queue limits are no longer calculated using reflection but estimated based on message schema. This is not supported for Cassandra connector (DBZ-2766).

    All schemas used by Debezium are now defined in a central point, properly named and versioned (DBZ-4365, DBZ-5044). This can lead to schema compatibility issues if a schema registry is used.

    The connector parameter naming was overhauled and separated into distinct trees. The connector configuration must be updated (DBZ-5043).

    Most Debezium connectors now restart by default when an exception related to communication (SqlException, IOException) is thrown (DBZ-5244).

    The skipped.operations configuration option now defaults to truncate t (DBZ-5497).

    Java 17 is no longer supported for writing tests. As some of the developer tools have issues when different Java level is used for main and test code, Debezium now uses Java 11 for the whole codebase (DBZ-5568).

    New features

    • Support binlog compression for MySQL DBZ-2663

    • Limit log output for "Streaming requested from LSN" warnings DBZ-3007

    • Redis Sink - Change the format of the message sent to the stream DBZ-4441

    • Debezium UI frontend should use new URLs and new JSON schema descriptors DBZ-4619

    • Provide a signal to pause/resume a running incremental snapshot DBZ-4727

    • support mongodb connection string as configuration option DBZ-4733

    • Update Readme on github for Cassandra 4.x support DBZ-4839

    • Debezium Server verifies existence and format of the config file DBZ-5116

    • Include Oracle Debezium Connector in Debezium Server distribution DBZ-5122

    • Smart Backfills | Ability to backfill selective data DBZ-5327

    • Support multiple tasks in vitess connector DBZ-5382

    • Enhancing Cassandra 4 Connector to read incremental changes and not wait for Commit Log file to be marked complete DBZ-5410

    • Unsupported non-relational tables should be gracefully skipped by the connector during streaming DBZ-5441

    • Support incremental snapshot stop-snapshot signal sourced from Kafka topic DBZ-5453

    • Upgrade Kafka client to 3.2.1 DBZ-5463

    • Restart SQL Server task on "Socket closed" exception DBZ-5478

    • Augment a uniqueness key filed/value in regex topic naming strategy DBZ-5480

    • Support wait/nowait clause in mariadb DBZ-5485

    • Adapt create function syntax of mariadb DBZ-5487

    • add schema doc from column comments DBZ-5489

    • My connector parse the mariadb relevant sequence statement failed DBZ-5505

    • Expose default values and enum values in schema history messages DBZ-5511

    • Simplify passing of SINK config properties to OffsetBackingStore DBZ-5513

    • Support BASE64_URL_SAFE in BinaryHandlingMode DBZ-5544

    • Handle Vstream Connection reset DBZ-5551

    • Supply partition when comiting offsets with source database DBZ-5557

    • Vitess: Filter table.include.list during VStream subscription DBZ-5572

    • Improve documentation editing experience by setting attributes for the preview DBZ-5576

    Fixes

    • Source info of incremental snapshot events exports wrong data DBZ-4329

    • "No maximum LSN recorded" log message can be spammed on low-activity databases DBZ-4631

    • Redis Sink config properties are not passed to DB history DBZ-5035

    • HTTP sink not retrying failing requests DBZ-5307

    • Translation from mongodb document to kafka connect schema fails when nested arrays contain no elements DBZ-5434

    • Duplicate SCNs on same thread Oracle RAC mode incorrectly processed DBZ-5439

    • Typo in postgresql document. DBZ-5450

    • Unit test fails on Windows DBZ-5452

    • Missing the regex properties validation before start connector of DefaultRegexTopicNamingStrategy DBZ-5471

    • Create Index DDL fails to parse when using TABLESPACE clause with quoted identifier DBZ-5472

    • Outbox doesn’t check array consistecy properly when it detemines its schema DBZ-5475

    • Misleading statistics written to the log DBZ-5476

    • Debezium connector task didn’t retry when failover in mongodb 5 DBZ-5479

    • ReadOnlyIncrementalSnapshotIT testStopSnapshotKafkaSignal randomly fails DBZ-5483

    • Better error reporting for signal table failures DBZ-5484

    • Oracle DATADUMP DDL cannot be parsed DBZ-5488

    • Mysql connector parser the ddl statement failed when including keyword "buckets" DBZ-5499

    • duplicate call to config.validateAndRecord() in RedisDatabaseHistory DBZ-5506

    • DDL statement couldn’t be parsed : mismatched input 'ENGINE' DBZ-5508

    • Use “database.dbnames” in SQL Server docs DBZ-5516

    • LogMiner DML parser incorrectly interprets concatenation operator inside quoted column value DBZ-5521

    • Mysql Connector DDL Parser does not parse all privileges DBZ-5522

    • SQL Server random test failures - EventProcessingFailureHandlingIT DBZ-5525

    • CREATE TABLE with JSON-based CHECK constraint clause causes MultipleParsingExceptions DBZ-5526

    • SQL Server test failure - verifyOffsets DBZ-5527

    • Unit test fails on Windows DBZ-5533

    • EmbeddedEngine should initialize Connector using SourceConnectorContext DBZ-5534

    • Unclear validation error when required field is missing DBZ-5538

    • Testsuite is missing server.id in MySQL connector’s configuration DBZ-5539

    • Support EMPTY column identifier DBZ-5550

    • Testsuite doesn’t reflect changes to SQLServer connector DBZ-5554

    • Use TCCL as the default classloader to load interface implementations DBZ-5561

    • max.queue.size.in.bytes is invalid DBZ-5569

    • Language type for listings in automatic topic creation DBZ-5573

    • Vitess: Handle VStream close unepectedly DBZ-5579

    • Unreliable RedisDatabaseHistoryIT DBZ-5582

    • Error when parsing alter sql DBZ-5587

    • Field validation errors are misleading for positive, non-zero expectations DBZ-5588

    • Mysql connector can’t handle the case sensitive of rename/change column statement DBZ-5589

    • LIST_VALUE_CLAUSE not allowing TIMESTAMP LITERAL DBZ-5592

    • Orcale DDL does not support comments on materialized views DBZ-5595

    • Oracle DDL does not support DEFAULT ON NULL DBZ-5605

    • Datatype mdsys.sdo_geometry not supported DBZ-5609

    Other changes

    • Add signal table automatically to include list DBZ-3293

    • No documentation for snapshot.include.collection.list property for Db2 connector DBZ-4345

    • Deprecate internal key/value converter options DBZ-4617

    • Run system testsuite inside OpenShift DBZ-5165

    • Upgrade SQL Server driver to 10.2.1.jre8 DBZ-5290

    • Rewrite oracle tests pipeline job to matrix job DBZ-5412

    • Debezium on ROSA sanity testing DBZ-5416

    • Update link format in shared tutorial file DBZ-5422

    • Deprecate legacy topic selector for all connectors DBZ-5457

    • Remove community conditionalization in signaling doc for Oracle incremental and ad hoc snapshots content DBZ-5458

    • Remove the dependency of JdbcConnection on DatabaseSchema DBZ-5470

    • Remove SQL Server SourceTimestampMode DBZ-5477

    • Maintanence branch builds on connector repos should build against proper branch DBZ-5492

    • Upgrade PostgreSQL driver to 42.4.1 DBZ-5493

    • Force updating snapshots when building the UI in the workflow DBZ-5501

    • Restrict connector workflows based on individual grammar changes in DDL module DBZ-5528

    • Disable preferring DDL before logical schema in history recovery DBZ-5535

    • Disable Eager loading for federated module bundles. DBZ-5545

    • Missing format value option in debezium-server doc DBZ-5546

    • Debezium inputs with number types have the wrong name of the input DBZ-5553

    • MySQL read.only property incorrectly appears in downstream documentation DBZ-5555

    • Add the Fed module running script and update readme DBZ-5560

    • Logging improvements in TestSuite DBZ-5563

    • Formatting characters in properties tables rendered in published content DBZ-5565

    • Upgrade mysql-binlog-connector-java library version DBZ-5574

    • MySQL database.server.id indicates default value is random but that no longer applies DBZ-5577

    • Switch test containers to Debezium nightly DBZ-5601

    • GitHub CI fails for DB2 connector DBZ-5606

    • ValidateSqlServerFiltersIT fails in CI DBZ-5613

    Release 2.0.0.Beta1 (July 26th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium previously exposed connector metrics as a single tuple of snapshot, streaming, and history-based beans. With this release, connector metrics have migrated to a multi-partition scheme, which means that the naming and how metrics are exposed has changed as a part of DBZ-4726. Please be sure to review your metrics gathering processes if you’re using tools like Grafana, Prometheus, or other JMX metrics gathering frameworks.

    Debezium previously provided support for reading and storing offsets, history, and other bits as a part of the debezium-core module. With this release, we’ve introduced a new module called debezium-storage with implementations for local file and Kafka based storage approaches (DBZ-5229). This approach provides a wonderful extension point going forward to introduce other storage implementations as the need arises. When upgrading, you may need to adjust your application’s dependencies depending on what storage module implementations your code depends upon.

    New features

    • Pluggable topic selector DBZ-4180

    • Read Debezium Metrics From Debezium Server Consumer DBZ-5235

    • Treat SQLServerException with "Broken pipe (Write failed)" exception message as a retriable exception DBZ-5292

    • Include user that committed change in metadata (oracle) DBZ-5358

    • UI Add debezium-ui i18n zh translation DBZ-5379

    • Support storing extended attributes in relational model and JSON schema history topic DBZ-5396

    • Validate topic naming strategy relative topic name properties DBZ-5414

    • Verify the unique index whether including function or arbitrary expression DBZ-5424

    • Remove the duplicated SimpleDdlParserListener from mysql connector DBZ-5425

    Fixes

    • MongoConnector’s field exclusion configuration does not work with fields with the same name but from different collections DBZ-4846

    • User input are not consistent on Filter step for the DBZ connectors DBZ-5246

    • KafkaDatabaseHistory without check database history topic create result caused UnknowTopicOrPartitionException DBZ-5249

    • Lob type data is inconsistent between source and sink, after modifying the primary key DBZ-5295

    • Caused by: java.io.EOFException: Failed to read next byte from position 2005308603 DBZ-5333

    • Incremental Snapshot: Oracle table name parsing does not support periods in DB name DBZ-5336

    • Support PostgreSQL default value function calls with schema prefixes DBZ-5340

    • Unsigned tinyint conversion fails for MySQL 8.x DBZ-5343

    • Log a warning when an unsupported LogMiner operation is detected for a captured table DBZ-5351

    • NullPointerException thrown when unique index based on both system and non-system generated columns DBZ-5356

    • MySQL Connector column hash v2 does not work DBZ-5366

    • Outbox JSON expansion fails when nested arrays contain no elements DBZ-5367

    • docker-maven-plugin needs to be upgraded for Mac Apple M1 DBZ-5369

    • AWS DocumentDB (with MongoDB Compatibility) Connect Fail DBZ-5371

    • Oracle Xstream does not propagate commit timestamp to transaction metadata DBZ-5373

    • UI View connector config in non-first cluster return 404 DBZ-5378

    • CommitScn not logged in expected format DBZ-5381

    • org.postgresql.util.PSQLException: Bad value for type timestamp/date/time: CURRENT_TIMESTAMP DBZ-5384

    • Missing "previousId" property with parsing the rename statement in kafka history topic DBZ-5386

    • Check constraint introduces a column based on constraint in the schema change event. DBZ-5390

    • The column is referenced as PRIMARY KEY, but a matching column is not defined in table DBZ-5398

    • Clarify which database name to use for signal.data.collection when using Oracle with pluggable database support DBZ-5399

    • Timestamp with time zone column’s default values not in GMT DBZ-5403

    • Upgrade to Kafka 3.1 broke build compatibility with Kafka 2.x and Kafka 3.0 DBZ-5404

    • PostgresConnectorIT#shouldRecoverFromRetriableException fails randomly DBZ-5408

    Other changes

    • Clean-up unused documentation variables DBZ-2595

    • Intermittent test failures on CI: EventProcessingFailureHandlingIT DBZ-4004

    • Clarify whether SQL Server on Azure is a supported configuration or not DBZ-4312

    • Remove redundant setting of last events DBZ-5047

    • Rename docker-images repository and JIRA component to container-images DBZ-5048

    • Update instructions for deploying Debezium on RHEL (downstream-only change) DBZ-5293

    • Add ts_ms field to examples of transaction boundary events and examples and update property description in documentation DBZ-5334

    • Oracle GitHub actions workflow no longer run tests on pushes DBZ-5349

    • Unify job names in jenkins system-tests DBZ-5392

    • Build stable branches for connector-specific repos DBZ-5409

    • Oracle non-cdb builds do not use the correct environment settings DBZ-5411

    • Update the topic naming strategy doc to all connectors DBZ-5413

    • Address User guide review comments for Oracle connector DBZ-5418

    • OracleSchemaMigrationIT fails on non-pluggable (non-CDB) databases DBZ-5419

    Release 2.0.0.Alpha3 (July 1st 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Alpha3 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Alpha3 plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.0.Alpha3 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Mysql Commit Timestamp DBZ-5170

    • Include event scn in Oracle records DBZ-5225

    • Redis Store does not work with GCP Managed Redis DBZ-5268

    Fixes

    • Incorrect loading of LSN from offsets DBZ-3942

    • Database history recovery will retain old tables after they’ve been renamed DBZ-4451

    • Adding new table with incremental snapshots not working DBZ-4834

    • BigDecimal has mismatching scale value for given Decimal schema DBZ-4890

    • Debezium has never found starting LSN DBZ-5031

    • Data duplication problem using postgresql source on debezium server DBZ-5070

    • Cursor fetch is used for all results during connection DBZ-5084

    • Debezuim connector fails at parsing select statement overrides when table name has space DBZ-5198

    • DDL statement couldn’t be parsed 2 - Oracle connector 1.9.3.Final DBZ-5230

    • Debezium server duplicates scripting jar files DBZ-5232

    • Cannot convert field type tinyint(1) unsigned to boolean DBZ-5236

    • Oracle unparsable ddl create table DBZ-5237

    • Postgres Incremental Snapshot on parent partitioned table not working DBZ-5240

    • Character set influencers are not properly parsed on default values DBZ-5241

    • Dupicate SCNs on Oracle RAC installations incorrectly processed DBZ-5245

    • NPE when using Debezium Embedded in Quarkus DBZ-5251

    • Oracle LogMiner may fail with an in-progress transaction in an archive log that has been deleted DBZ-5256

    • Order of source block table names in a rename schema change event is not deterministic DBZ-5257

    • Debezium fails to connect to replicaset if a node is down DBZ-5260

    • No changes to commit_scn when oracle-connector got new lob data DBZ-5266

    • Invalid date 'SEPTEMBER 31' DBZ-5267

    • database.history.store.only.captured.tables.ddl not suppressing logs DBZ-5270

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-5271

    • Deadlock during snapshot with Mongo connector DBZ-5272

    • Mysql parser is not able to handle variables in KILL command DBZ-5273

    • Debezium server fail when connect to Azure Event Hubs DBZ-5279

    • ORA-01086 savepoint never established raised when database history topic cannot be created or does not exist DBZ-5281

    • Enabling database.history.store.only.captured.tables.ddl does not restrict history topic records DBZ-5285

    Other changes

    • Add script SMT test case to OCP test suite DBZ-2581

    • Confusing example for schema change topic DBZ-4713

    • Update cache-invalidation example DBZ-4754

    • Switch from static yaml descriptors to dynamic objects DBZ-4830

    • Verify that snapshot deployments build and deploy javadocs DBZ-4875

    • DelayStrategy should accept Duration rather than long ms DBZ-4902

    • Use maven 3.8.4 version with enforcer plugin DBZ-5069

    • Add option for '*' wildcard usage testsuite preparation jenkins jobs DBZ-5190

    • Use the Maven wrapper in the Github and Jenkins workflows DBZ-5207

    • Improve performance of OracleConnectorIT shouldIgnoreAllTablesInExcludedSchemas test DBZ-5226

    • Document use of JAR artifact to build Debezium scripting SMT into Kafka Connect DBZ-5227

    • Create shared adoc fragments for specifying MBean name format in connector metrics sections DBZ-5233

    • Build Oracle connector by default without Maven profiles DBZ-5234

    • Remove reference to removed case insensitive option in Oracle README.md DBZ-5250

    • Several Oracle tests do not get database name from TestHelper DBZ-5258

    • Upgrade to Quarkus 2.10.0.Final DBZ-5259

    • Upgrade PostgreSQL driver to 42.4.0 DBZ-5261

    • Refactor ChangeEventQueue to better support n:1 threads DBZ-5277

    • Upgrade MongoDB driver to 4.6.1 DBZ-5287

    Release 2.0.0.Alpha2 (June 9th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium MySQL connector represented FLOAT datatype as FLOAT64. Now the correct type FLOAT32 is used (DBZ-3865).

    MongoDB connector no longer supports streaming from oplog. The connector fully switched to change streams streaming (DBZ-4951).

    All connectors now use multi-partitioned codebase. This has impact only on project developers (DBZ-5042).

    All deprecated configuration options were removed (DBZ-5045).

    New features

    • Provide a signal to stop the running incremental snapshot DBZ-4251

    • SQL Server - Fail connector when a user doesn’t have the right permission (CDCReader) DBZ-4346

    • Allow mongodb-connector to decode Binary payloads DBZ-4600

    • Add UI backend tests for SQL Server connector DBZ-4867

    • direct usage of debezium engine ignores ChangeConsumer.supportsTombstoneEvents DBZ-5052

    • Config the cache size property for ByLogicalTableRouter caches DBZ-5072

    • Introduce a new extension api for query debezium version DBZ-5092

    • Introduce a new field "ts_ms" to identify the process time for schema change event DBZ-5098

    • MongoDB Connector should use RawBsonDocument instead of Document DBZ-5113

    Fixes

    • Postgres existing publication is not updated with the new table DBZ-3921

    • Error and connector stops when DDL contains lateral DBZ-4780

    • Schema changes should flush SCN to offsets if there are no other active transactions DBZ-4782

    • Connector stops streaming after a re-balance DBZ-4792

    • MySQL connector increment snapshot failed parse datetime column lenth when connector set "snapshot.fetch.size": 20000 DBZ-4939

    • [MySQL Debezium] DDL Parsing error - CREATE OR REPLACE TABLE DBZ-4958

    • InstanceAlreadyExistsException during MongoDb connector metrics registration DBZ-5011

    • DateTimeParseException: Text 'infinity' could not be parsed in Postgres connector DBZ-5014

    • PostgreSQL ENUM default values are missing from generated schema DBZ-5038

    • Debezium official documentation typo DBZ-5040

    • Fix inconsistent transaction id when handling transactional messages in Vitess connector DBZ-5063

    • 4 Connections per connector (postgres) DBZ-5074

    • Oracle documentation refers to archive_log_target rather than archive_lag_target DBZ-5076

    • 'ALTER TABLE mytable DROP FOREIGN KEY IF EXISTS mytable_fk' no viable alternative at input 'ALTER TABLE mytable DROP FOREIGN KEY IF' DBZ-5077

    • Oracle Logminer: records missed during switch from snapshot to streaming mode DBZ-5085

    • Interrupting a snapshot process can hang for some JDBC drivers DBZ-5087

    • Debezium fails to undo change event due to transaction id ending in ffffffff with LogMiner DBZ-5090

    • Table changes are not filled in schema changes from snapshot DBZ-5096

    • Postgresql connector does not retry one some errors when postgres is taken offline DBZ-5097

    • Parsing zero day fails DBZ-5099

    • Cannot Set debezium.sink.kafka.producer.ssl.endpoint.identification.algorithm to empty value DBZ-5105

    • Debezium connector failed with create table statement DBZ-5108

    • Current version of surefire/failsafe skips tests on failure in BeforeAll DBZ-5112

    Other changes

    • Restructure documentation for custom converters DBZ-4588

    • Document xmin.fetch.interval.ms property for Postgres connector DBZ-4734

    • Update to Quarkus 2.9.2.Final DBZ-4806

    • Upgrade Oracle driver to 21.5.0.0 DBZ-4877

    • Execute Debezium UI build when core library is changed DBZ-4947

    • Remove unused Oracle connector code DBZ-4973

    • Links to cassandra 3 and 4 artifacts no longer work for Debezium 1.9+ DBZ-5055

    • Align Postgresql driver with Quarkus DBZ-5060

    • Outdated links in Javadoc documentation DBZ-5075

    • Rename "Mysql" to "MySql" in related MysqlFieldReader interface DBZ-5078

    • Create CI job for maven repository verification DBZ-5082

    • Remove database.server.id default value handler, no longer auto-generated. DBZ-5100

    • Upgrade Jackson Databind to 2.13.2.2 DBZ-5107

    • Switch to released version of Fixture5 extension in System testsuite DBZ-5114

    Release 2.0.0.Alpha1 (April 28th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Java 11 is required to run Debezium in any form: Kafka Connect plug-in, Debezium engine and Debezium Server (DBZ-4949).

    Maven 3.8.4 is required to build Debezium (DBZ-5064).

    PostgreSQL connector no longer supports old versions of protobuf decoding plug-in that do not have transaction and default value support (DBZ-703).

    PostgreSQL connector no longer supports wal2json decoding plug-in (dehttps://issues.redhat.com/browse/DBZ-4156[DBZ-4156]). pgoutput decoding plug-in is recommended as the replacement.

    Legacy implementation of MySQL connector was removed (DBZ-4950).

    Confluent Avro converters are no longer present in ebezium container images (DBZ-4952).

    JDBC legacy date/time properties support was removed from MySQL connector (DBZ-4965).

    New features

    • Implement Pub/Sub Lite change consumer DBZ-4450

    • Include Instant Client in Docker build for Oracle in Tutorial examples DBZ-1013

    • Add Google Pub/Sub emulator support DBZ-4491

    • Making Postgres PSQLException: This connection has been closed. retriable DBZ-4948

    • ORA-04030: out of process memory when trying to allocate 65568 bytes (Logminer LCR c,krvxrib:buffer) DBZ-4963

    • Should store event header timestamp in HistoryRecord DBZ-4998

    • DBZ-UI: In the Edit/Duplicate connector flow make the access/secret key/password/Client Secret filed as editable. DBZ-5001

    • adjust LogMiner batch size based on comparison with currently used batch size DBZ-5005

    Fixes

    • Connector throws java.lang.ArrayIndexOutOfBoundsException DBZ-3848

    • Document no relevant tables should be in the SYS or SYSTEM tablespaces. DBZ-4762

    • Getting java.sql.SQLException: ORA-01291: missing logfile while running with archive log only DBZ-4879

    • Debezium uses wrong LCR format for Oracle 12.1 DBZ-4932

    • Oracle duplicates on connector restart DBZ-4936

    • Oracle truncate causes exception DBZ-4953

    • NPE caused by io.debezium.connector.oracle.antlr.listener.ColumnDefinitionParserListener.resolveColumnDataType DBZ-4976

    • Oracle connector may throw NullPointerException when stopped after an unsuccessful startup DBZ-4978

    • NPE for non-table related DDLs DBZ-4979

    • CTE statements aren’t parsed by MySQL connector DBZ-4980

    • Missing SSL configuration option in the debezium mongodb connector UI DBZ-4981

    • Unsupported MySQL Charsets during Snapshotting for fields with custom converter DBZ-4983

    • Outbox Transform does not allow expanded payload with additional fields in the envelope DBZ-4989

    • Redis Sink - clientSetname is taking place before auth DBZ-4993

    • CLOB with single quotes causes parser exception DBZ-4994

    • Oracle DDL parser fails on references_clause with no column list DBZ-4996

    • Can’t use 'local' database through mongos DBZ-5003

    • Triggering Incremental Snapshot on MongoDB connector throws json parsing error DBZ-5015

    • Jenkins jobs fail to download debezium-bom DBZ-5017

    • Redis Sink - Check if client is not null before closing it DBZ-5019

    • Cassandra 3 handler does not process partition deletions correctly DBZ-5022

    • Keyspaces should be initialised in all schema change listeners on sessions startup. DBZ-5023

    • SQL Server in multi-partition mode fails if a new database is added to an existing configuration DBZ-5033

    • Mysql tests start before MySQL DB constainer is running DBZ-5054

    • Debezium server configuration properties not rendered correctly DBZ-5058

    Other changes

    • Add integration test for Oracle database.url configurations DBZ-3318

    • Build Cassandra 3.x connector with Java 11 DBZ-4910

    • Add ignoreSnapshots build option to release pipeline DBZ-4957

    • Update Pulsar client version used by Debezium Server DBZ-4961

    • Intermittent failure of RedisStreamIT.testRedisConnectionRetry DBZ-4966

    • Add triggers for 2.x paths in Github CI DBZ-4971

    • Debezium raised an exception and the task was still running DBZ-4987

    • Nexus Staging Maven plugin is incompatible with OpenJDK 17 DBZ-5025

    • Duplicate definition of Maven plugins DBZ-5026

    • OracleOffsetContextTest should be scoped to LogMiner only DBZ-5028

    • Scope several new Oracle tests to LogMiner only DBZ-5029

    • Failure in jdk outreach jobs DBZ-5041

    • Update artifact server job listing script DBZ-5051

    • Add FAQ about ORA-01882 and Oracle 11 to documentation DBZ-5057

    • Upgrade to Quarkus 2.8.2.Final DBZ-5062

    \ No newline at end of file + Release Notes for Debezium 2.0

    Release Notes for Debezium 2.0

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.0.1.Final (December 7th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Improve LogMiner query performance by reducing REGEXP_LIKE disjunctions DBZ-5648

    • Replace obsolete DebeziumDownload attribute DBZ-5835

    • Reduce container image sizes by consolidating operations per layer DBZ-5864

    Fixes

    • Using snapshot boundary mode "all" causes DebeziumException on Oracle RAC DBZ-5302

    • ORA-01003: no statement parsed DBZ-5352

    • Missing snapshot pending transactions DBZ-5482

    • Db2 documentation refers to invalid SMALLMONEY and MONEY data types DBZ-5504

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • Oracle SQL parsing error when collation used DBZ-5726

    • Unparseable DDL statement DBZ-5734

    • NullPointerException thrown during snapshot of tables in Oracle source connector DBZ-5738

    • Remove note from snapshot metrics docs file that flags incremental snapshots as TP feature DBZ-5748

    • Exclude Oracle Compression Advisor tables from capture to avoid infinite loop DBZ-5756

    • More Oracle logging DBZ-5759

    • Oracle should only log row contents at TRACE level DBZ-5760

    • Outbox Router documentation outdated regarding value converter DBZ-5770

    • Using DBMS_LOB.ERASE by itself can lead to an unexpected UPDATE with null BLOB value DBZ-5773

    • PostgreSQL missing metadata info DBZ-5789

    • Oracle connector does not attempt restart when ORA-01089 exception is nested DBZ-5791

    • Message with LSN 'LSN{XYZ}' not present among LSNs seen in the location phase DBZ-5792

    • Mysql connector alter table with database name parse failed DBZ-5802

    • Conflicting documentation for snapshot.mode property in MongoDB connector v2.0 DBZ-5812

    • 'topic.prefix' default value in MongoDB connector v2.0 DBZ-5817

    • Quarkus outbox extention never finishes the open tracing span DBZ-5821

    • fix names of range fields in schema to comply with Avro standard DBZ-5826

    • CREATE/ALTER user does not support COMMENT token DBZ-5836

    • Invalid Java object for schema with type FLOAT64: class java.lang.Float DBZ-5843

    • IllegalStateException is thrown if task is recovering while other tasks are running DBZ-5855

    • CREATE/ALTER user does not support ATTRIBUTE token DBZ-5876

    Other changes

    • SQL Server connector docs should mention multi-task support DBZ-5714

    • Debezium connectors ship with an old version of google-protobuf vulnerable to CVE-2022-3171 DBZ-5747

    • Upgrade to Quarkus 2.14.CR1 DBZ-5774

    • Upgrade postgres driver to version 42.5.0 DBZ-5780

    • Upgrade to Quarkus 2.14.0.Final DBZ-5786

    • Doc Typo in cloudevents DBZ-5788

    • Add ORA-01555 to Oracle documentation DBZ-5816

    • GitHub Actions: Deprecating save-state and set-output commands DBZ-5824

    • Upgrade wildfly-elytron to 1.15.5 / 1.16.1 due to CVE-2021-3642 DBZ-5854

    Release 2.0.0.Final (October 14th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Final plugin files, and restart the connector using the new configuration parameter names. Upon restart, the 2.0.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    PostgreSQL transaction id is 32-bit integer and gets rolled over. To simplify deduplication of transactions the LSN was added as a part of the identifier (DBZ-5329).

    New features

    There are no new features in this release.

    Fixes

    • ORA-01289: cannot add duplicate logfile DBZ-5276

    • Function DATE_ADD can be used as an identifier DBZ-5679

    • MySqlConnector parse create view statement failed DBZ-5708

    • The DDL_FILTER of SchemaHistory doesn’t work for including break lines ddl statement DBZ-5709

    • Debezium Server 1.9.6 is using MSSQL JDBC 7.2.2 instead of 9.4.1 DBZ-5711

    • Invalid prop names in MongoDB outbox router docs DBZ-5715

    • tests are running forever DBZ-5718

    • cassandra connector first startup ever may fail DBZ-5719

    • Vitess: Handle Vstream error: unexpected server EOF DBZ-5722

    • ParsingException: DDL statement couldn’t be parsed (index hints) DBZ-5724

    Other changes

    • Remove whilelisted/blacklisted from log messages DBZ-5710

    • MySqlSchemaMigrationIT runs failed DBZ-5728

    Release 2.0.0.CR1 (October 7th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.CR1 plugin files, and restart the connector using the new configuration parameter names. Upon restart, the 2.0.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Default behaviour for schema.name.adjustment.mode is now none. While the avro was safer option in case the user was using Avro converter it was confusing in the more frequent cases when default JSON converter was used. (DBZ-5541).

    New features

    • Implement retries for Debezium embedded engine DBZ-4629

    • MySqlErrorHandler should handle SocketException DBZ-5486

    • Traditional snapshot process setting source.ts_ms DBZ-5591

    • Clean up "logical name" config DBZ-5594

    • Upgrade Kafka client to 3.3.1 DBZ-5600

    • When writing docs, use website stylesheet for IDE preview in IntelliJ DBZ-5616

    • Support READ ONLY/ENCRYPTION options for alter database statment DBZ-5622

    • Clarify semantics of include/exclude options DBZ-5625

    • Added support for Mongo pre-image in change stream DBZ-5628

    • Support for seting stats_sample_pages=default in alter table statements DBZ-5631

    • support for using any expression in kill statements DBZ-5636

    • Logging enhancement for non-incremental snapshot in postgres connector DBZ-5639

    • Support set statement in mariadb DBZ-5650

    • Add Mongo-initiator 6.0 container image DBZ-5666

    • Remove logic name parameter from sub connector config DBZ-5671

    Fixes

    • ConvertingEngineBuilder looses the accents DBZ-4213

    • Debezium Db2 Connector fails to handle default values in schema when is making the snapshot DBZ-4990

    • Debezium 2.0.0.Beta1 Azure SQL breaking change DBZ-5496

    • Oracle connector parsing SELECT_LOB_LOCATOR event missing constant unavailable.value.placeholder DBZ-5581

    • Starting Embedded Engine swallows ClassNotFoundException so user cannot see why engine does not work DBZ-5583

    • Message with LSN foo larger than expected LSN bar DBZ-5597

    • Fix broken anchors in docs DBZ-5618

    • DDL Parsing Error DBZ-5623

    • MySQL connector cannot parse default value of decimal colum enclosed in double quotes DBZ-5630

    • Support grant LOAD FROM S3, SELECT INTO S3, INVOKE LAMBDA with aws mysql DBZ-5633

    • Continuously WARNs about undo transactions when LOB is enabled DBZ-5635

    • Literal "${project.version}" in the source record instead of the actual version DBZ-5640

    • TABLE_TYPE keyword can be used as identifier DBZ-5643

    • Large numbers of ROLLBACK transactions can lead to memory leak when LOB is not enabled. DBZ-5645

    • Race in DebeziumContainer during startup DBZ-5651

    • Outbox pattern nested payload leads to connector crash DBZ-5654

    • Allow the word STATEMENT to be a table / column name DBZ-5662

    • ValidatePostgresConnectionIT.testInvalidPostgresConnection fails DBZ-5664

    • Hardcoded driver task properties are not being passed to underlying connections DBZ-5670

    • Keyword virtual can be used as an identifier DBZ-5674

    • MongoDB Connector with DocumentDB errors with "{$natural: -1} is not supported" DBZ-5677

    Other changes

    • Align connector properties to have an empty default cell if property has no default DBZ-3327

    • Improve Filter SMT documentation / examples DBZ-4417

    • Test failure on CI: SqlServerConnectorIT#updatePrimaryKeyTwiceWithRestartInMiddleOfTx DBZ-4475

    • Intermittent test failure: SqlServerConnectorIT#updatePrimaryKeyWithRestartInMiddle() DBZ-4490

    • Edit content newly added to the MongoDB connector doc DBZ-5542

    • Upgrade apicurio to 2.2.5.Final DBZ-5549

    • Modify the Instantiator to not require classloader DBZ-5585

    • Use quay.io in test containers DBZ-5603

    • Remove records from being logged at all levels DBZ-5612

    • Upgrade binary log client to 0.27.2 DBZ-5620

    • Allow to change docker maven properties from command line DBZ-5657

    • Update docker maven plugin DBZ-5658

    • Run UI tests on all connector changes DBZ-5660

    • Cleanup UI e2e tests after removing default value for topic.prefix DBZ-5667

    Release 2.0.0.Beta2 (September 16th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.1 and has been tested with version 3.2.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Beta2 plugin files, and restart the connector using the new configuration parameter names. Upon restart, the 2.0.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Object sizes for memory queue limits are no longer calculated using reflection but estimated based on message schema. This is not supported for Cassandra connector (DBZ-2766).

    All schemas used by Debezium are now defined in a central point, properly named and versioned (DBZ-4365, DBZ-5044). This can lead to schema compatibility issues if a schema registry is used.

    The connector parameter naming was overhauled and separated into distinct trees. The connector configuration must be updated (DBZ-5043).

    Most Debezium connectors now restart by default when an exception related to communication (SqlException, IOException) is thrown (DBZ-5244).

    The skipped.operations configuration option now defaults to truncate t (DBZ-5497).

    Java 17 is no longer supported for writing tests. As some of the developer tools have issues when different Java level is used for main and test code, Debezium now uses Java 11 for the whole codebase (DBZ-5568).

    New features

    • Support binlog compression for MySQL DBZ-2663

    • Limit log output for "Streaming requested from LSN" warnings DBZ-3007

    • Redis Sink - Change the format of the message sent to the stream DBZ-4441

    • Debezium UI frontend should use new URLs and new JSON schema descriptors DBZ-4619

    • Provide a signal to pause/resume a running incremental snapshot DBZ-4727

    • support mongodb connection string as configuration option DBZ-4733

    • Update Readme on github for Cassandra 4.x support DBZ-4839

    • Debezium Server verifies existence and format of the config file DBZ-5116

    • Include Oracle Debezium Connector in Debezium Server distribution DBZ-5122

    • Smart Backfills | Ability to backfill selective data DBZ-5327

    • Support multiple tasks in vitess connector DBZ-5382

    • Enhancing Cassandra 4 Connector to read incremental changes and not wait for Commit Log file to be marked complete DBZ-5410

    • Unsupported non-relational tables should be gracefully skipped by the connector during streaming DBZ-5441

    • Support incremental snapshot stop-snapshot signal sourced from Kafka topic DBZ-5453

    • Upgrade Kafka client to 3.2.1 DBZ-5463

    • Restart SQL Server task on "Socket closed" exception DBZ-5478

    • Augment a uniqueness key filed/value in regex topic naming strategy DBZ-5480

    • Support wait/nowait clause in mariadb DBZ-5485

    • Adapt create function syntax of mariadb DBZ-5487

    • add schema doc from column comments DBZ-5489

    • My connector parse the mariadb relevant sequence statement failed DBZ-5505

    • Expose default values and enum values in schema history messages DBZ-5511

    • Simplify passing of SINK config properties to OffsetBackingStore DBZ-5513

    • Support BASE64_URL_SAFE in BinaryHandlingMode DBZ-5544

    • Handle Vstream Connection reset DBZ-5551

    • Supply partition when comiting offsets with source database DBZ-5557

    • Vitess: Filter table.include.list during VStream subscription DBZ-5572

    • Improve documentation editing experience by setting attributes for the preview DBZ-5576

    Fixes

    • Source info of incremental snapshot events exports wrong data DBZ-4329

    • "No maximum LSN recorded" log message can be spammed on low-activity databases DBZ-4631

    • Redis Sink config properties are not passed to DB history DBZ-5035

    • HTTP sink not retrying failing requests DBZ-5307

    • Translation from mongodb document to kafka connect schema fails when nested arrays contain no elements DBZ-5434

    • Duplicate SCNs on same thread Oracle RAC mode incorrectly processed DBZ-5439

    • Typo in postgresql document. DBZ-5450

    • Unit test fails on Windows DBZ-5452

    • Missing the regex properties validation before start connector of DefaultRegexTopicNamingStrategy DBZ-5471

    • Create Index DDL fails to parse when using TABLESPACE clause with quoted identifier DBZ-5472

    • Outbox doesn’t check array consistecy properly when it detemines its schema DBZ-5475

    • Misleading statistics written to the log DBZ-5476

    • Debezium connector task didn’t retry when failover in mongodb 5 DBZ-5479

    • ReadOnlyIncrementalSnapshotIT testStopSnapshotKafkaSignal randomly fails DBZ-5483

    • Better error reporting for signal table failures DBZ-5484

    • Oracle DATADUMP DDL cannot be parsed DBZ-5488

    • Mysql connector parser the ddl statement failed when including keyword "buckets" DBZ-5499

    • duplicate call to config.validateAndRecord() in RedisDatabaseHistory DBZ-5506

    • DDL statement couldn’t be parsed : mismatched input 'ENGINE' DBZ-5508

    • Use “database.dbnames” in SQL Server docs DBZ-5516

    • LogMiner DML parser incorrectly interprets concatenation operator inside quoted column value DBZ-5521

    • Mysql Connector DDL Parser does not parse all privileges DBZ-5522

    • SQL Server random test failures - EventProcessingFailureHandlingIT DBZ-5525

    • CREATE TABLE with JSON-based CHECK constraint clause causes MultipleParsingExceptions DBZ-5526

    • SQL Server test failure - verifyOffsets DBZ-5527

    • Unit test fails on Windows DBZ-5533

    • EmbeddedEngine should initialize Connector using SourceConnectorContext DBZ-5534

    • Unclear validation error when required field is missing DBZ-5538

    • Testsuite is missing server.id in MySQL connector’s configuration DBZ-5539

    • Support EMPTY column identifier DBZ-5550

    • Testsuite doesn’t reflect changes to SQLServer connector DBZ-5554

    • Use TCCL as the default classloader to load interface implementations DBZ-5561

    • max.queue.size.in.bytes is invalid DBZ-5569

    • Language type for listings in automatic topic creation DBZ-5573

    • Vitess: Handle VStream close unepectedly DBZ-5579

    • Unreliable RedisDatabaseHistoryIT DBZ-5582

    • Error when parsing alter sql DBZ-5587

    • Field validation errors are misleading for positive, non-zero expectations DBZ-5588

    • Mysql connector can’t handle the case sensitive of rename/change column statement DBZ-5589

    • LIST_VALUE_CLAUSE not allowing TIMESTAMP LITERAL DBZ-5592

    • Orcale DDL does not support comments on materialized views DBZ-5595

    • Oracle DDL does not support DEFAULT ON NULL DBZ-5605

    • Datatype mdsys.sdo_geometry not supported DBZ-5609

    Other changes

    • Add signal table automatically to include list DBZ-3293

    • No documentation for snapshot.include.collection.list property for Db2 connector DBZ-4345

    • Deprecate internal key/value converter options DBZ-4617

    • Run system testsuite inside OpenShift DBZ-5165

    • Upgrade SQL Server driver to 10.2.1.jre8 DBZ-5290

    • Rewrite oracle tests pipeline job to matrix job DBZ-5412

    • Debezium on ROSA sanity testing DBZ-5416

    • Update link format in shared tutorial file DBZ-5422

    • Deprecate legacy topic selector for all connectors DBZ-5457

    • Remove community conditionalization in signaling doc for Oracle incremental and ad hoc snapshots content DBZ-5458

    • Remove the dependency of JdbcConnection on DatabaseSchema DBZ-5470

    • Remove SQL Server SourceTimestampMode DBZ-5477

    • Maintanence branch builds on connector repos should build against proper branch DBZ-5492

    • Upgrade PostgreSQL driver to 42.4.1 DBZ-5493

    • Force updating snapshots when building the UI in the workflow DBZ-5501

    • Restrict connector workflows based on individual grammar changes in DDL module DBZ-5528

    • Disable preferring DDL before logical schema in history recovery DBZ-5535

    • Disable Eager loading for federated module bundles. DBZ-5545

    • Missing format value option in debezium-server doc DBZ-5546

    • Debezium inputs with number types have the wrong name of the input DBZ-5553

    • MySQL read.only property incorrectly appears in downstream documentation DBZ-5555

    • Add the Fed module running script and update readme DBZ-5560

    • Logging improvements in TestSuite DBZ-5563

    • Formatting characters in properties tables rendered in published content DBZ-5565

    • Upgrade mysql-binlog-connector-java library version DBZ-5574

    • MySQL database.server.id indicates default value is random but that no longer applies DBZ-5577

    • Switch test containers to Debezium nightly DBZ-5601

    • GitHub CI fails for DB2 connector DBZ-5606

    • ValidateSqlServerFiltersIT fails in CI DBZ-5613

    Release 2.0.0.Beta1 (July 26th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium previously exposed connector metrics as a single tuple of snapshot, streaming, and history-based beans. With this release, connector metrics have migrated to a multi-partition scheme, which means that the naming and how metrics are exposed has changed as a part of DBZ-4726. Please be sure to review your metrics gathering processes if you’re using tools like Grafana, Prometheus, or other JMX metrics gathering frameworks.

    Debezium previously provided support for reading and storing offsets, history, and other bits as a part of the debezium-core module. With this release, we’ve introduced a new module called debezium-storage with implementations for local file and Kafka based storage approaches (DBZ-5229). This approach provides a wonderful extension point going forward to introduce other storage implementations as the need arises. When upgrading, you may need to adjust your application’s dependencies depending on what storage module implementations your code depends upon.

    New features

    • Pluggable topic selector DBZ-4180

    • Read Debezium Metrics From Debezium Server Consumer DBZ-5235

    • Treat SQLServerException with "Broken pipe (Write failed)" exception message as a retriable exception DBZ-5292

    • Include user that committed change in metadata (oracle) DBZ-5358

    • UI Add debezium-ui i18n zh translation DBZ-5379

    • Support storing extended attributes in relational model and JSON schema history topic DBZ-5396

    • Validate topic naming strategy relative topic name properties DBZ-5414

    • Verify the unique index whether including function or arbitrary expression DBZ-5424

    • Remove the duplicated SimpleDdlParserListener from mysql connector DBZ-5425

    Fixes

    • MongoConnector’s field exclusion configuration does not work with fields with the same name but from different collections DBZ-4846

    • User input are not consistent on Filter step for the DBZ connectors DBZ-5246

    • KafkaDatabaseHistory without check database history topic create result caused UnknowTopicOrPartitionException DBZ-5249

    • Lob type data is inconsistent between source and sink, after modifying the primary key DBZ-5295

    • Caused by: java.io.EOFException: Failed to read next byte from position 2005308603 DBZ-5333

    • Incremental Snapshot: Oracle table name parsing does not support periods in DB name DBZ-5336

    • Support PostgreSQL default value function calls with schema prefixes DBZ-5340

    • Unsigned tinyint conversion fails for MySQL 8.x DBZ-5343

    • Log a warning when an unsupported LogMiner operation is detected for a captured table DBZ-5351

    • NullPointerException thrown when unique index based on both system and non-system generated columns DBZ-5356

    • MySQL Connector column hash v2 does not work DBZ-5366

    • Outbox JSON expansion fails when nested arrays contain no elements DBZ-5367

    • docker-maven-plugin needs to be upgraded for Mac Apple M1 DBZ-5369

    • AWS DocumentDB (with MongoDB Compatibility) Connect Fail DBZ-5371

    • Oracle Xstream does not propagate commit timestamp to transaction metadata DBZ-5373

    • UI View connector config in non-first cluster return 404 DBZ-5378

    • CommitScn not logged in expected format DBZ-5381

    • org.postgresql.util.PSQLException: Bad value for type timestamp/date/time: CURRENT_TIMESTAMP DBZ-5384

    • Missing "previousId" property with parsing the rename statement in kafka history topic DBZ-5386

    • Check constraint introduces a column based on constraint in the schema change event. DBZ-5390

    • The column is referenced as PRIMARY KEY, but a matching column is not defined in table DBZ-5398

    • Clarify which database name to use for signal.data.collection when using Oracle with pluggable database support DBZ-5399

    • Timestamp with time zone column’s default values not in GMT DBZ-5403

    • Upgrade to Kafka 3.1 broke build compatibility with Kafka 2.x and Kafka 3.0 DBZ-5404

    • PostgresConnectorIT#shouldRecoverFromRetriableException fails randomly DBZ-5408

    Other changes

    • Clean-up unused documentation variables DBZ-2595

    • Intermittent test failures on CI: EventProcessingFailureHandlingIT DBZ-4004

    • Clarify whether SQL Server on Azure is a supported configuration or not DBZ-4312

    • Remove redundant setting of last events DBZ-5047

    • Rename docker-images repository and JIRA component to container-images DBZ-5048

    • Update instructions for deploying Debezium on RHEL (downstream-only change) DBZ-5293

    • Add ts_ms field to examples of transaction boundary events and examples and update property description in documentation DBZ-5334

    • Oracle GitHub actions workflow no longer run tests on pushes DBZ-5349

    • Unify job names in jenkins system-tests DBZ-5392

    • Build stable branches for connector-specific repos DBZ-5409

    • Oracle non-cdb builds do not use the correct environment settings DBZ-5411

    • Update the topic naming strategy doc to all connectors DBZ-5413

    • Address User guide review comments for Oracle connector DBZ-5418

    • OracleSchemaMigrationIT fails on non-pluggable (non-CDB) databases DBZ-5419

    Release 2.0.0.Alpha3 (July 1st 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Alpha3 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Alpha3 plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.0.Alpha3 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Mysql Commit Timestamp DBZ-5170

    • Include event scn in Oracle records DBZ-5225

    • Redis Store does not work with GCP Managed Redis DBZ-5268

    Fixes

    • Incorrect loading of LSN from offsets DBZ-3942

    • Database history recovery will retain old tables after they’ve been renamed DBZ-4451

    • Adding new table with incremental snapshots not working DBZ-4834

    • BigDecimal has mismatching scale value for given Decimal schema DBZ-4890

    • Debezium has never found starting LSN DBZ-5031

    • Data duplication problem using postgresql source on debezium server DBZ-5070

    • Cursor fetch is used for all results during connection DBZ-5084

    • Debezuim connector fails at parsing select statement overrides when table name has space DBZ-5198

    • DDL statement couldn’t be parsed 2 - Oracle connector 1.9.3.Final DBZ-5230

    • Debezium server duplicates scripting jar files DBZ-5232

    • Cannot convert field type tinyint(1) unsigned to boolean DBZ-5236

    • Oracle unparsable ddl create table DBZ-5237

    • Postgres Incremental Snapshot on parent partitioned table not working DBZ-5240

    • Character set influencers are not properly parsed on default values DBZ-5241

    • Dupicate SCNs on Oracle RAC installations incorrectly processed DBZ-5245

    • NPE when using Debezium Embedded in Quarkus DBZ-5251

    • Oracle LogMiner may fail with an in-progress transaction in an archive log that has been deleted DBZ-5256

    • Order of source block table names in a rename schema change event is not deterministic DBZ-5257

    • Debezium fails to connect to replicaset if a node is down DBZ-5260

    • No changes to commit_scn when oracle-connector got new lob data DBZ-5266

    • Invalid date 'SEPTEMBER 31' DBZ-5267

    • database.history.store.only.captured.tables.ddl not suppressing logs DBZ-5270

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-5271

    • Deadlock during snapshot with Mongo connector DBZ-5272

    • Mysql parser is not able to handle variables in KILL command DBZ-5273

    • Debezium server fail when connect to Azure Event Hubs DBZ-5279

    • ORA-01086 savepoint never established raised when database history topic cannot be created or does not exist DBZ-5281

    • Enabling database.history.store.only.captured.tables.ddl does not restrict history topic records DBZ-5285

    Other changes

    • Add script SMT test case to OCP test suite DBZ-2581

    • Confusing example for schema change topic DBZ-4713

    • Update cache-invalidation example DBZ-4754

    • Switch from static yaml descriptors to dynamic objects DBZ-4830

    • Verify that snapshot deployments build and deploy javadocs DBZ-4875

    • DelayStrategy should accept Duration rather than long ms DBZ-4902

    • Use maven 3.8.4 version with enforcer plugin DBZ-5069

    • Add option for '*' wildcard usage testsuite preparation jenkins jobs DBZ-5190

    • Use the Maven wrapper in the Github and Jenkins workflows DBZ-5207

    • Improve performance of OracleConnectorIT shouldIgnoreAllTablesInExcludedSchemas test DBZ-5226

    • Document use of JAR artifact to build Debezium scripting SMT into Kafka Connect DBZ-5227

    • Create shared adoc fragments for specifying MBean name format in connector metrics sections DBZ-5233

    • Build Oracle connector by default without Maven profiles DBZ-5234

    • Remove reference to removed case insensitive option in Oracle README.md DBZ-5250

    • Several Oracle tests do not get database name from TestHelper DBZ-5258

    • Upgrade to Quarkus 2.10.0.Final DBZ-5259

    • Upgrade PostgreSQL driver to 42.4.0 DBZ-5261

    • Refactor ChangeEventQueue to better support n:1 threads DBZ-5277

    • Upgrade MongoDB driver to 4.6.1 DBZ-5287

    Release 2.0.0.Alpha2 (June 9th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.2.0 and has been tested with version 3.2.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium MySQL connector represented FLOAT datatype as FLOAT64. Now the correct type FLOAT32 is used (DBZ-3865).

    MongoDB connector no longer supports streaming from oplog. The connector fully switched to change streams streaming (DBZ-4951).

    All connectors now use multi-partitioned codebase. This has impact only on project developers (DBZ-5042).

    All deprecated configuration options were removed (DBZ-5045).

    New features

    • Provide a signal to stop the running incremental snapshot DBZ-4251

    • SQL Server - Fail connector when a user doesn’t have the right permission (CDCReader) DBZ-4346

    • Allow mongodb-connector to decode Binary payloads DBZ-4600

    • Add UI backend tests for SQL Server connector DBZ-4867

    • direct usage of debezium engine ignores ChangeConsumer.supportsTombstoneEvents DBZ-5052

    • Config the cache size property for ByLogicalTableRouter caches DBZ-5072

    • Introduce a new extension api for query debezium version DBZ-5092

    • Introduce a new field "ts_ms" to identify the process time for schema change event DBZ-5098

    • MongoDB Connector should use RawBsonDocument instead of Document DBZ-5113

    Fixes

    • Postgres existing publication is not updated with the new table DBZ-3921

    • Error and connector stops when DDL contains lateral DBZ-4780

    • Schema changes should flush SCN to offsets if there are no other active transactions DBZ-4782

    • Connector stops streaming after a re-balance DBZ-4792

    • MySQL connector increment snapshot failed parse datetime column lenth when connector set "snapshot.fetch.size": 20000 DBZ-4939

    • [MySQL Debezium] DDL Parsing error - CREATE OR REPLACE TABLE DBZ-4958

    • InstanceAlreadyExistsException during MongoDb connector metrics registration DBZ-5011

    • DateTimeParseException: Text 'infinity' could not be parsed in Postgres connector DBZ-5014

    • PostgreSQL ENUM default values are missing from generated schema DBZ-5038

    • Debezium official documentation typo DBZ-5040

    • Fix inconsistent transaction id when handling transactional messages in Vitess connector DBZ-5063

    • 4 Connections per connector (postgres) DBZ-5074

    • Oracle documentation refers to archive_log_target rather than archive_lag_target DBZ-5076

    • 'ALTER TABLE mytable DROP FOREIGN KEY IF EXISTS mytable_fk' no viable alternative at input 'ALTER TABLE mytable DROP FOREIGN KEY IF' DBZ-5077

    • Oracle Logminer: records missed during switch from snapshot to streaming mode DBZ-5085

    • Interrupting a snapshot process can hang for some JDBC drivers DBZ-5087

    • Debezium fails to undo change event due to transaction id ending in ffffffff with LogMiner DBZ-5090

    • Table changes are not filled in schema changes from snapshot DBZ-5096

    • Postgresql connector does not retry one some errors when postgres is taken offline DBZ-5097

    • Parsing zero day fails DBZ-5099

    • Cannot Set debezium.sink.kafka.producer.ssl.endpoint.identification.algorithm to empty value DBZ-5105

    • Debezium connector failed with create table statement DBZ-5108

    • Current version of surefire/failsafe skips tests on failure in BeforeAll DBZ-5112

    Other changes

    • Restructure documentation for custom converters DBZ-4588

    • Document xmin.fetch.interval.ms property for Postgres connector DBZ-4734

    • Update to Quarkus 2.9.2.Final DBZ-4806

    • Upgrade Oracle driver to 21.5.0.0 DBZ-4877

    • Execute Debezium UI build when core library is changed DBZ-4947

    • Remove unused Oracle connector code DBZ-4973

    • Links to cassandra 3 and 4 artifacts no longer work for Debezium 1.9+ DBZ-5055

    • Align Postgresql driver with Quarkus DBZ-5060

    • Outdated links in Javadoc documentation DBZ-5075

    • Rename "Mysql" to "MySql" in related MysqlFieldReader interface DBZ-5078

    • Create CI job for maven repository verification DBZ-5082

    • Remove database.server.id default value handler, no longer auto-generated. DBZ-5100

    • Upgrade Jackson Databind to 2.13.2.2 DBZ-5107

    • Switch to released version of Fixture5 extension in System testsuite DBZ-5114

    Release 2.0.0.Alpha1 (April 28th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.1.0 and has been tested with version 3.1.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.0.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.0.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.0.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Java 11 is required to run Debezium in any form: Kafka Connect plug-in, Debezium engine and Debezium Server (DBZ-4949).

    Maven 3.8.4 is required to build Debezium (DBZ-5064).

    PostgreSQL connector no longer supports old versions of protobuf decoding plug-in that do not have transaction and default value support (DBZ-703).

    PostgreSQL connector no longer supports wal2json decoding plug-in (dehttps://issues.redhat.com/browse/DBZ-4156[DBZ-4156]). pgoutput decoding plug-in is recommended as the replacement.

    Legacy implementation of MySQL connector was removed (DBZ-4950).

    Confluent Avro converters are no longer present in ebezium container images (DBZ-4952).

    JDBC legacy date/time properties support was removed from MySQL connector (DBZ-4965).

    New features

    • Implement Pub/Sub Lite change consumer DBZ-4450

    • Include Instant Client in Docker build for Oracle in Tutorial examples DBZ-1013

    • Add Google Pub/Sub emulator support DBZ-4491

    • Making Postgres PSQLException: This connection has been closed. retriable DBZ-4948

    • ORA-04030: out of process memory when trying to allocate 65568 bytes (Logminer LCR c,krvxrib:buffer) DBZ-4963

    • Should store event header timestamp in HistoryRecord DBZ-4998

    • DBZ-UI: In the Edit/Duplicate connector flow make the access/secret key/password/Client Secret filed as editable. DBZ-5001

    • adjust LogMiner batch size based on comparison with currently used batch size DBZ-5005

    Fixes

    • Connector throws java.lang.ArrayIndexOutOfBoundsException DBZ-3848

    • Document no relevant tables should be in the SYS or SYSTEM tablespaces. DBZ-4762

    • Getting java.sql.SQLException: ORA-01291: missing logfile while running with archive log only DBZ-4879

    • Debezium uses wrong LCR format for Oracle 12.1 DBZ-4932

    • Oracle duplicates on connector restart DBZ-4936

    • Oracle truncate causes exception DBZ-4953

    • NPE caused by io.debezium.connector.oracle.antlr.listener.ColumnDefinitionParserListener.resolveColumnDataType DBZ-4976

    • Oracle connector may throw NullPointerException when stopped after an unsuccessful startup DBZ-4978

    • NPE for non-table related DDLs DBZ-4979

    • CTE statements aren’t parsed by MySQL connector DBZ-4980

    • Missing SSL configuration option in the debezium mongodb connector UI DBZ-4981

    • Unsupported MySQL Charsets during Snapshotting for fields with custom converter DBZ-4983

    • Outbox Transform does not allow expanded payload with additional fields in the envelope DBZ-4989

    • Redis Sink - clientSetname is taking place before auth DBZ-4993

    • CLOB with single quotes causes parser exception DBZ-4994

    • Oracle DDL parser fails on references_clause with no column list DBZ-4996

    • Can’t use 'local' database through mongos DBZ-5003

    • Triggering Incremental Snapshot on MongoDB connector throws json parsing error DBZ-5015

    • Jenkins jobs fail to download debezium-bom DBZ-5017

    • Redis Sink - Check if client is not null before closing it DBZ-5019

    • Cassandra 3 handler does not process partition deletions correctly DBZ-5022

    • Keyspaces should be initialised in all schema change listeners on sessions startup. DBZ-5023

    • SQL Server in multi-partition mode fails if a new database is added to an existing configuration DBZ-5033

    • Mysql tests start before MySQL DB constainer is running DBZ-5054

    • Debezium server configuration properties not rendered correctly DBZ-5058

    Other changes

    • Add integration test for Oracle database.url configurations DBZ-3318

    • Build Cassandra 3.x connector with Java 11 DBZ-4910

    • Add ignoreSnapshots build option to release pipeline DBZ-4957

    • Update Pulsar client version used by Debezium Server DBZ-4961

    • Intermittent failure of RedisStreamIT.testRedisConnectionRetry DBZ-4966

    • Add triggers for 2.x paths in Github CI DBZ-4971

    • Debezium raised an exception and the task was still running DBZ-4987

    • Nexus Staging Maven plugin is incompatible with OpenJDK 17 DBZ-5025

    • Duplicate definition of Maven plugins DBZ-5026

    • OracleOffsetContextTest should be scoped to LogMiner only DBZ-5028

    • Scope several new Oracle tests to LogMiner only DBZ-5029

    • Failure in jdk outreach jobs DBZ-5041

    • Update artifact server job listing script DBZ-5051

    • Add FAQ about ORA-01882 and Oracle 11 to documentation DBZ-5057

    • Upgrade to Quarkus 2.8.2.Final DBZ-5062

    \ No newline at end of file diff --git a/releases/2.1/index.html b/releases/2.1/index.html index 6aa623b2bc..1ad5331296 100644 --- a/releases/2.1/index.html +++ b/releases/2.1/index.html @@ -1 +1 @@ - Debezium Release Series 2.1

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.29
    MongoDB Database: 4.2, 4.4, 5.0, 6.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14, 15
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.5.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0, 21.3.0.0, 21.4.0.0, 21.5.0.0, 21.6.0.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.1.0.Beta1

    2022-12-16
    Automatic LSN flushing can be disabled for PostgreSQL; Transformation predicates can be used in Debezium Server; An SMT that allows to calculate partitioin number based on a set of configured fields

    2.1.0.Alpha2

    2022-11-30
    Cassandra connector can run on Debezium Server; Nats JetStream sink adapter; Debezium images rebased to Fedora 37; Debezium image size significantly reduced; Improved handling of varint and decimal types in Cassandra
    \ No newline at end of file + Debezium Release Series 2.1

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.29
    MongoDB Database: 4.2, 4.4, 5.0, 6.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14, 15
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.5.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0, 21.3.0.0, 21.4.0.0, 21.5.0.0, 21.6.0.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.1.0.Beta1

    2022-12-16
    Automatic LSN flushing can be disabled for PostgreSQL; Transformation predicates can be used in Debezium Server; An SMT that allows to calculate partitioin number based on a set of configured fields

    2.1.0.Alpha2

    2022-11-30
    Cassandra connector can run on Debezium Server; Nats JetStream sink adapter; Debezium images rebased to Fedora 37; Debezium image size significantly reduced; Improved handling of varint and decimal types in Cassandra
    \ No newline at end of file diff --git a/releases/2.1/release-notes.html b/releases/2.1/release-notes.html index 8d1e77da84..976df83a80 100644 --- a/releases/2.1/release-notes.html +++ b/releases/2.1/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 2.1

    Release Notes for Debezium 2.1

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.1.4.Final (April 4th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • java.lang.NullPointerException in MySQL connector with max.queue.size.in.bytes DBZ-6104

    • debezium-connector-mysql failed to parse serveral DDLs of 'CREATE TABLE' DBZ-6124

    • Zerofill property failed for different int types DBZ-6185

    • GRANT DELETE HISTORY couldn’t be parsed in mariadb DBZ-6186

    • ddl parse failed for key partition table DBZ-6188

    • Use CHARSET for alterByConvertCharset clause DBZ-6194

    • Data loss upon connector restart DBZ-6204

    • DDL statement couldn’t be parsed: no viable alternative at input 'GRANT APPLICATION_PASSWORD_ADMIN' DBZ-6211

    • ParsingException: DDL statement couldn’t be parsed DBZ-6217

    • The CHARACTER/CHARACTER(p)/CHARACTER VARYING(p) data types not recognized as JDBC type CHAR DBZ-6221

    • MySQL singleDeleteStatement parser does not support table alias DBZ-6243

    • Missing GEOMETRY keyword which can be used as column name DBZ-6250

    • MariaDB’s UUID column type cannot be parsed when scheme is loaded DBZ-6255

    • Multiplatform build of example-postres fails DBZ-6258

    • Add protoc version property to postgres connector pom.xml DBZ-6261

    • Table names with spaces are not correctly deserialized when using an Infinispan cache as the transaction buffer DBZ-6273

    • Transaction buffer state can become corrupted when using Infinispan cache with LOBs DBZ-6275

    Other changes

    • Update connector configuration examples in deployment instructions DBZ-6153

    • Insert missing Nebel annotations for Oracle connector FAQ topic DBZ-6215

    • Add metadata for MongoDB change streams topic DBZ-6223

    • Deprecate MongoDb 4.0 DBZ-6246

    • Fix broken link to Streams documentation in shared deployment files DBZ-6263

    • Update config example in Installing Debezium on OpenShift DBZ-6267

    • Address review feedback in downstream RHEL and OCP installation guides DBZ-6272

    • Infinispan cache configuration used by Oracle tests are not compatible with Infinispan 14.0.2 DBZ-6274

    • Upgrade MySQL JDBC driver to 8.0.32 DBZ-6304

    Release 2.1.3.Final (March 9th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    SSN field from Oracle connector was propagated as INT32 in the source info block. This could lead to overflows on certain installations so the field is now propagated as INT64 (DBZ-6091).

    Debezium was truncating on timezoned types milli/microsecond zeroes regardless of the schema width setting. This is no longer the case and Debezium provides the correct number of trailing zeroes (DBZ-6163).

    New features

    • Reduce verbosity of skipped transactions if transaction has no events relevant to captured tables DBZ-6094

    • Support String type for key in Mongo incremental snapshot DBZ-6116

    • Add hostname validator to connector configuration DBZ-6156

    Fixes

    • ActivateTracingSpan wrong timestamps reported DBZ-5827

    • Postgresql Data Loss on restarts DBZ-5915

    • debezium-connector-cassandra 2.1.0.Alpha2 plugin can no longer run "out of the box" DBZ-5925

    • MongoDB Incremental Snapshot not Working DBZ-5973

    • Mask password in log statement DBZ-6064

    • Loading Custom offset storage fails with Class not found error DBZ-6075

    • SQL Server tasks fail if the number of databases is smaller than maxTasks DBZ-6084

    • GCP Spanner connector start failing when there are multiple indexes on a single column DBZ-6101

    • When using LOB support, an UPDATE against multiple rows can lead to inconsistent event data DBZ-6107

    • Negative remaining attempts on MongoDB reconnect case DBZ-6113

    • Tables with spaces or non-ASCII characters in their name are not captured by Oracle because they must be quoted. DBZ-6120

    • Offsets are not advanced in a CDB deployment with low frequency of changes to PDB DBZ-6125

    • Oracle TIMESTAMP WITH TIME ZONE is emitted as GMT during snapshot rather than the specified TZ DBZ-6143

    • Config options internal.schema.history.internal.ddl.filter not working DBZ-6190

    Other changes

    • Prepare MongoDB ExtractNewDocumentState SMT doc for downstream GA DBZ-6006

    • Refactor OCP deployment job DBZ-6044

    • Refactor ARO deployment job DBZ-6045

    • Invalid links breaking downstream documentation build DBZ-6069

    • Remove references to adding configuration settings to a .properties file DBZ-6130

    • Upgrade Quarkus dependencies to 2.16.3.Final DBZ-6150

    • Disable advance slot checking DBZ-6191

    Release 2.1.1.Final (December 22nd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Spanner connector is missing JSR-310 dependency DBZ-5959

    Other changes

    There are no other changes in this release.

    Release 2.1.2.Final (January 26th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    ZonedTimestamp strings were sent with fractional second trailing zeroes removed. Current behaviour is to provide the trailing zeroes padded to the length/scale of the source column (DBZ-5996).

    New features

    • Remove redundant modifiers of members for interface fields DBZ-2439

    • Update the DBZ-UI documentation page to incorporate the recently added "Custom properties" step details DBZ-5878

    • Postgres LSN check should honor event.processing.failure.handling.mode DBZ-6012

    • Enhance the Spanner connector by adding features and/or solving bugs DBZ-6014

    Fixes

    • Data type conversion failed for mysql bigint DBZ-5798

    • Oracle cannot undo change DBZ-5907

    • Truncate records incompatible with ExtractNewRecordState DBZ-5966

    • Computed partition must not be negative DBZ-5967

    • NPE in execute snapshot signal with exclude.tables config on giving wrong table name DBZ-5988

    • There is a problem with postgresql connector parsing the boundary value of money type DBZ-5991

    • Run PostgresConnectorIT.shouldReceiveChangesForChangeColumnDefault() failed DBZ-6002

    • Nullable columns marked with "optional: false" in DDL events DBZ-6003

    • Vitess: Handle the shard list difference between current db shards and persisted shards DBZ-6011

    • DDL statement with TokuDB engine specific "CLUSTERING KEY" couldn’t be parsed DBZ-6016

    • DDL parse fail for role revoke with "user-like" role name DBZ-6019

    • DDL parse fail for ALTER USER x DEFAULT ROLE y; DBZ-6020

    • Offsets are not flushed on connect offsets topic when encountering an error on Postgres connector DBZ-6026

    • Unexpected format for TIME column: 8:00 DBZ-6029

    • Oracle does not support compression/logging clauses after an LOB storage clause DBZ-6031

    • Debezium is logging the full message along with the error DBZ-6037

    • Improve resilience during internal schema history recovery from Kafka DBZ-6039

    Other changes

    • Plug-in version information duplicated DBZ-4669

    • Remove incubating documentation text for MongoDB ExtractNewDocumentState SMT DBZ-5975

    • Upgrade Apicurio to 2.4.1.Final DBZ-5977

    • Upgrade JDBC driver to 42.5.1 DBZ-5980

    • Migrate connector triggers to gitlab DBZ-5992

    • SQL Server IncrementalSnapshotWithRecompileIT fails randomly DBZ-6035

    Release 2.1.0.Final (December 22nd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    MongoDB Debezium connector required streaming from primary node in the cluster. This is no longer necessary and the connector prefers reading from non-primary node (DBZ-4339).

    Vitess Debezium connector now supports snapshotting. This means that upon new connector start the existing content will be snapshotted by default (DBZ-5930).

    New features

    • Implement support for JSON_TABLE in MySQL parser DBZ-3575

    • Provide Debezium Spanner connector DBZ-5937

    • Print the readable data class name in JdbcValueConverters.handleUnknownData DBZ-5946

    Fixes

    • Cannot expand JSON payload with nested arrays of objects DBZ-5344

    • field.exclude.list in MongoDB Connector v2.0 doesn’t accept * as a wildcard for collectionName DBZ-5818

    • Debezium UI documentation link is not accessible to the user via documentation side navigation menu. DBZ-5900

    • Toasted json/int/bigint arrays are not properly processed DBZ-5936

    • No table filters found for filtered publication DBZ-5949

    Other changes

    There are no other changes in this release.

    Release 2.1.0.Beta1 (December 16th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    The Cassandra range tombstone information was a plain string. Now logical object with parsed data and types is provided (DBZ-5912).

    The Cassandra TimeUUID datatype was propadagated as binary object but documented as string. This was incorrect and it is now propagated as string (DBZ-5923).

    New features

    • Postgres: Disable LSN confirmation to database DBZ-5811

    • Realize data distribution according to specified fields DBZ-5847

    • Support predicate parameters in Debezium Server DBZ-5940

    • Use the Patternfly database icon as a placeholder for Oracle Database DBZ-5941

    Fixes

    • Handle toasted String array DBZ-4941

    • Cassandra deletes log files on exit when real time processing is enabled DBZ-5776

    • ReplicationConnectionIT test fails DBZ-5800

    • MongoDB docs for incremental snapshots is SQL specific DBZ-5804

    • Conflicting documentation for snapshot.mode property in MongoDB connector v2.0 DBZ-5812

    • IllegalStateException is thrown if task is recovering while other tasks are running DBZ-5855

    • Negative decimal number scale is not supported by Avro DBZ-5880

    • Connector deployment instructions provide incorrect Maven path for Debezium scripting component DBZ-5882

    • Incorrect Streams Kafka version in connector deployment instructions for creating a custom image DBZ-5883

    • Run postgres connector RecordsStreamProducerIT failed DBZ-5895

    • Suppport INSERT INTO statements with dots in column names DBZ-5904

    • Incorrect default value for additional-condition docs DBZ-5906

    • ConnectorLifecycle is not logging anymore the exception stacktrace when startup fails DBZ-5908

    • Debezium Server stops with NPE when Redis does not report the "maxmemory" field in "info memory" command DBZ-5911

    • PostgresConnectorIT#shouldAckLsnOnSourceByDefault and #shouldNotAckLsnOnSource fails DBZ-5914

    • SQL Server connector database.instance config option is ignored DBZ-5924

    • Wrong java version in Installing Debezium documentation DBZ-5928

    • Toasted varchar array is not correctly processed DBZ-5944

    Other changes

    • Use static import for Assertions in all tests DBZ-2432

    • Test window function in MySQL parser DBZ-3576

    • Run test against Apicurio registry DBZ-5838

    • Add tests against multinode RS and (ideally) sharded cluster DBZ-5857

    • Update documentation for Debezium Server with Cassandra Connector DBZ-5885

    • Allow CI deploy clusters to PSI DBZ-5887

    • Mariadb and Mysql have different syntax DBZ-5888

    • Execute IT tests in alphabetical order DBZ-5889

    • Migrate debezium-server-nats-jetstream to AssertJ DBZ-5901

    • Reduce jenkins jobs footprint DBZ-5905

    • Move Debezium Cassandra connector out from incubation DBZ-5922

    • Clean up "doSnapshot" config code DBZ-5931

    • Version badge on README in Cassandra connector is stuck DBZ-5932

    • Make startup of Cassandra container faster DBZ-5933

    • Fix logging for tests for Cassandra connector DBZ-5934

    Release 2.1.0.Alpha2 (November 30th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium REST extension was not deployed in Debezium container and has to be added by the user. This is no longer necessary as the REST expension is included (DBZ-4303).

    Debezium images were upgraded to use Fedora 37 (DBZ-5461).

    PostgreSQL connector could resume streaming from re-created replication slot even if it no longer contained data that connector has not seen. The result could be a silent data loss. Now the connector checks if the resume point is present and fails to start if it is not (DBZ-5739).

    New features

    • Expose Cassandra Connector via Debezium Server DBZ-2098

    • Validate Debezium Server configuration properties DBZ-4720

    • Enable pass-thru of additional config options in Debezium UI DBZ-5324

    • Sink adapter for Nats JetStream DBZ-5772

    • Replace obsolete DebeziumDownload attribute DBZ-5835

    • Reduce container image sizes by consolidating operations per layer DBZ-5864

    • Typo error in Oracle connector documentation 2.0 DBZ-5877

    Fixes

    • Embedded Engine or Server retrying indefinitely on all types of retriable errors DBZ-5661

    • PostgreSQL missing metadata info DBZ-5789

    • For outbox transformation, when 'table.expand.json.payload' is set to true null values are not correctly deserialized DBZ-5796

    • Cassandra decimal values are not deserialized using Debezium Cassandra Connector DBZ-5807

    • Cassandra varint type is currently not supported DBZ-5808

    • 'topic.prefix' default value in MongoDB connector v2.0 DBZ-5817

    • Quarkus outbox extention never finishes the open tracing span DBZ-5821

    • fix names of range fields in schema to comply with Avro standard DBZ-5826

    • ExtractNewDocumentState does not support updateDescription.updatedFields field DBZ-5834

    • CREATE/ALTER user does not support COMMENT token DBZ-5836

    • Invalid Java object for schema with type FLOAT64: class java.lang.Float DBZ-5843

    • Message contents might not get logged in case of error DBZ-5874

    • CREATE/ALTER user does not support ATTRIBUTE token DBZ-5876

    Other changes

    • SQL table rename affect on Kafka connector and topic DBZ-5423

    • Create RHAF version of Debezium docs DBZ-5729

    • Add Debezium doc section to RHAF DBZ-5730

    • Create new Debezium section in the docs. DBZ-5731

    • Add Debezium docs to DDF DBZ-5732

    • Create ARO provisioning job DBZ-5742

    • Amend Confluent Avro converter installation documentation DBZ-5762

    • Modify ocp system tests to archive test results and logs DBZ-5785

    • GitHub Actions: Deprecating save-state and set-output commands DBZ-5824

    • Change logging levels of several schema change handler log entries DBZ-5833

    • Revert running tests against Apicurio registry DBZ-5839

    • Add Kubernetes plugin to Jenkins DBZ-5844

    • OracleConnectorIT shouldIgnoreAllTablesInExcludedSchemas test may randomly fail DBZ-5850

    • Upgrade wildfly-elytron to 1.15.5 / 1.16.1 due to CVE-2021-3642 DBZ-5854

    • Upgrade PostgreSQL example images to Postgres 15 DBZ-5860

    • GitHub Actions deprecation of Node 12 - actions/checkout DBZ-5870

    Release 2.1.0.Alpha1 (November 10th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.0.Alpha1 plugin files, and restart the connector using the same (when upgrading from the same major version) or updated (when upgrading from an older major version) configuration. Upon restart, the 2.1.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Support for Postgres 15 DBZ-5370

    • Add support for SMT predicates in Debezium Engine DBZ-5530

    • MySQL Connector capture TRUNCATE command as message in table topic DBZ-5610

    • Improve LogMiner query performance by reducing REGEXP_LIKE disjunctions DBZ-5648

    • Expose heartbeatFrequency setting for mongodb connector DBZ-5736

    • Provide Redis storage as store module DBZ-5749

    • Redis Sink wait for Redis Replica writes DBZ-5752

    • Redis sink back-pressure mechanism when Redis memory is almost full DBZ-5782

    • Enhance the ability to sanitize topic name DBZ-5790

    Fixes

    • Using snapshot boundary mode "all" causes DebeziumException on Oracle RAC DBZ-5302

    • ORA-01003: no statement parsed DBZ-5352

    • Missing snapshot pending transactions DBZ-5482

    • Db2 documentation refers to invalid SMALLMONEY and MONEY data types DBZ-5504

    • Using snapshot.mode ALWAYS uses SCN from offsets DBZ-5626

    • MongoDB multiple tasks monitor misalignment DBZ-5629

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • Oracle SQL parsing error when collation used DBZ-5726

    • Columns are not excluded when doing incremental snapshots DBZ-5727

    • Unparseable DDL statement DBZ-5734

    • NullPointerException thrown during snapshot of tables in Oracle source connector DBZ-5738

    • Remove note from snapshot metrics docs file that flags incremental snapshots as TP feature DBZ-5748

    • Hostname not available for load balanced ocp services in ARO DBZ-5753

    • Exclude Oracle Compression Advisor tables from capture to avoid infinite loop DBZ-5756

    • More Oracle logging DBZ-5759

    • Oracle should only log row contents at TRACE level DBZ-5760

    • Update system test artifact preparation to reflect naming changes in downstream DBZ-5767

    • Outbox Router documentation outdated regarding value converter DBZ-5770

    • Using DBMS_LOB.ERASE by itself can lead to an unexpected UPDATE with null BLOB value DBZ-5773

    • Suppress logging of undetermined optionality for explicitly excluded columns DBZ-5783

    • Oracle connector does not attempt restart when ORA-01089 exception is nested DBZ-5791

    • Message with LSN 'LSN{XYZ}' not present among LSNs seen in the location phase DBZ-5792

    • The merge method of configuration is not work DBZ-5801

    • Mysql connector alter table with database name parse failed DBZ-5802

    Other changes

    • Execute tests with Apicurio converters DBZ-2131

    • Revision info missing on website DBZ-5083

    • Debezium on ARO sanity testing DBZ-5647

    • SQL Server connector docs should mention multi-task support DBZ-5714

    • Remove downstream TP designation for RAC content in Oracle connector docs DBZ-5735

    • Update Pulsar client to 2.10.1 DBZ-5737

    • Parametrize Strimzi operator name to enable multiple testsuites running on same cluster DBZ-5744

    • Enable CI to report results to ReportPortal instance DBZ-5745

    • Debezium connectors ship with an old version of google-protobuf vulnerable to CVE-2022-3171 DBZ-5747

    • Testsuite unable to connect to SQLServer due to encryption DBZ-5763

    • Testsuite uses incorrect jdbc driver class for SQLServer with docker DBZ-5764

    • Upgrade com.jayway.jsonpath:json-path DBZ-5766

    • Product profile is not used when running Oracle matrix against downstream DBZ-5768

    • Upgrade to Quarkus 2.14.CR1 DBZ-5774

    • Switch from Fest to AssertJ DBZ-5779

    • Upgrade postgres driver to version 42.5.0 DBZ-5780

    • Upgrade to Quarkus 2.14.0.Final DBZ-5786

    • Doc Typo in cloudevents DBZ-5788

    • Fix DB2 reporting script path DBZ-5799

    • Add ORA-01555 to Oracle documentation DBZ-5816

    • Change visibility of BaseSourceTask#logStatistics method to protected DBZ-5822

    • Upgrade Postgres images to Debian 11 DBZ-5823

    \ No newline at end of file + Release Notes for Debezium 2.1

    Release Notes for Debezium 2.1

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.1.4.Final (April 4th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • java.lang.NullPointerException in MySQL connector with max.queue.size.in.bytes DBZ-6104

    • debezium-connector-mysql failed to parse serveral DDLs of 'CREATE TABLE' DBZ-6124

    • Zerofill property failed for different int types DBZ-6185

    • GRANT DELETE HISTORY couldn’t be parsed in mariadb DBZ-6186

    • ddl parse failed for key partition table DBZ-6188

    • Use CHARSET for alterByConvertCharset clause DBZ-6194

    • Data loss upon connector restart DBZ-6204

    • DDL statement couldn’t be parsed: no viable alternative at input 'GRANT APPLICATION_PASSWORD_ADMIN' DBZ-6211

    • ParsingException: DDL statement couldn’t be parsed DBZ-6217

    • The CHARACTER/CHARACTER(p)/CHARACTER VARYING(p) data types not recognized as JDBC type CHAR DBZ-6221

    • MySQL singleDeleteStatement parser does not support table alias DBZ-6243

    • Missing GEOMETRY keyword which can be used as column name DBZ-6250

    • MariaDB’s UUID column type cannot be parsed when scheme is loaded DBZ-6255

    • Multiplatform build of example-postres fails DBZ-6258

    • Add protoc version property to postgres connector pom.xml DBZ-6261

    • Table names with spaces are not correctly deserialized when using an Infinispan cache as the transaction buffer DBZ-6273

    • Transaction buffer state can become corrupted when using Infinispan cache with LOBs DBZ-6275

    Other changes

    • Update connector configuration examples in deployment instructions DBZ-6153

    • Insert missing Nebel annotations for Oracle connector FAQ topic DBZ-6215

    • Add metadata for MongoDB change streams topic DBZ-6223

    • Deprecate MongoDb 4.0 DBZ-6246

    • Fix broken link to Streams documentation in shared deployment files DBZ-6263

    • Update config example in Installing Debezium on OpenShift DBZ-6267

    • Address review feedback in downstream RHEL and OCP installation guides DBZ-6272

    • Infinispan cache configuration used by Oracle tests are not compatible with Infinispan 14.0.2 DBZ-6274

    • Upgrade MySQL JDBC driver to 8.0.32 DBZ-6304

    Release 2.1.3.Final (March 9th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    SSN field from Oracle connector was propagated as INT32 in the source info block. This could lead to overflows on certain installations so the field is now propagated as INT64 (DBZ-6091).

    Debezium was truncating on timezoned types milli/microsecond zeroes regardless of the schema width setting. This is no longer the case and Debezium provides the correct number of trailing zeroes (DBZ-6163).

    New features

    • Reduce verbosity of skipped transactions if transaction has no events relevant to captured tables DBZ-6094

    • Support String type for key in Mongo incremental snapshot DBZ-6116

    • Add hostname validator to connector configuration DBZ-6156

    Fixes

    • ActivateTracingSpan wrong timestamps reported DBZ-5827

    • Postgresql Data Loss on restarts DBZ-5915

    • debezium-connector-cassandra 2.1.0.Alpha2 plugin can no longer run "out of the box" DBZ-5925

    • MongoDB Incremental Snapshot not Working DBZ-5973

    • Mask password in log statement DBZ-6064

    • Loading Custom offset storage fails with Class not found error DBZ-6075

    • SQL Server tasks fail if the number of databases is smaller than maxTasks DBZ-6084

    • GCP Spanner connector start failing when there are multiple indexes on a single column DBZ-6101

    • When using LOB support, an UPDATE against multiple rows can lead to inconsistent event data DBZ-6107

    • Negative remaining attempts on MongoDB reconnect case DBZ-6113

    • Tables with spaces or non-ASCII characters in their name are not captured by Oracle because they must be quoted. DBZ-6120

    • Offsets are not advanced in a CDB deployment with low frequency of changes to PDB DBZ-6125

    • Oracle TIMESTAMP WITH TIME ZONE is emitted as GMT during snapshot rather than the specified TZ DBZ-6143

    • Config options internal.schema.history.internal.ddl.filter not working DBZ-6190

    Other changes

    • Prepare MongoDB ExtractNewDocumentState SMT doc for downstream GA DBZ-6006

    • Refactor OCP deployment job DBZ-6044

    • Refactor ARO deployment job DBZ-6045

    • Invalid links breaking downstream documentation build DBZ-6069

    • Remove references to adding configuration settings to a .properties file DBZ-6130

    • Upgrade Quarkus dependencies to 2.16.3.Final DBZ-6150

    • Disable advance slot checking DBZ-6191

    Release 2.1.1.Final (December 22nd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Spanner connector is missing JSR-310 dependency DBZ-5959

    Other changes

    There are no other changes in this release.

    Release 2.1.2.Final (January 26th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    ZonedTimestamp strings were sent with fractional second trailing zeroes removed. Current behaviour is to provide the trailing zeroes padded to the length/scale of the source column (DBZ-5996).

    New features

    • Remove redundant modifiers of members for interface fields DBZ-2439

    • Update the DBZ-UI documentation page to incorporate the recently added "Custom properties" step details DBZ-5878

    • Postgres LSN check should honor event.processing.failure.handling.mode DBZ-6012

    • Enhance the Spanner connector by adding features and/or solving bugs DBZ-6014

    Fixes

    • Data type conversion failed for mysql bigint DBZ-5798

    • Oracle cannot undo change DBZ-5907

    • Truncate records incompatible with ExtractNewRecordState DBZ-5966

    • Computed partition must not be negative DBZ-5967

    • NPE in execute snapshot signal with exclude.tables config on giving wrong table name DBZ-5988

    • There is a problem with postgresql connector parsing the boundary value of money type DBZ-5991

    • Run PostgresConnectorIT.shouldReceiveChangesForChangeColumnDefault() failed DBZ-6002

    • Nullable columns marked with "optional: false" in DDL events DBZ-6003

    • Vitess: Handle the shard list difference between current db shards and persisted shards DBZ-6011

    • DDL statement with TokuDB engine specific "CLUSTERING KEY" couldn’t be parsed DBZ-6016

    • DDL parse fail for role revoke with "user-like" role name DBZ-6019

    • DDL parse fail for ALTER USER x DEFAULT ROLE y; DBZ-6020

    • Offsets are not flushed on connect offsets topic when encountering an error on Postgres connector DBZ-6026

    • Unexpected format for TIME column: 8:00 DBZ-6029

    • Oracle does not support compression/logging clauses after an LOB storage clause DBZ-6031

    • Debezium is logging the full message along with the error DBZ-6037

    • Improve resilience during internal schema history recovery from Kafka DBZ-6039

    Other changes

    • Plug-in version information duplicated DBZ-4669

    • Remove incubating documentation text for MongoDB ExtractNewDocumentState SMT DBZ-5975

    • Upgrade Apicurio to 2.4.1.Final DBZ-5977

    • Upgrade JDBC driver to 42.5.1 DBZ-5980

    • Migrate connector triggers to gitlab DBZ-5992

    • SQL Server IncrementalSnapshotWithRecompileIT fails randomly DBZ-6035

    Release 2.1.0.Final (December 22nd 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    MongoDB Debezium connector required streaming from primary node in the cluster. This is no longer necessary and the connector prefers reading from non-primary node (DBZ-4339).

    Vitess Debezium connector now supports snapshotting. This means that upon new connector start the existing content will be snapshotted by default (DBZ-5930).

    New features

    • Implement support for JSON_TABLE in MySQL parser DBZ-3575

    • Provide Debezium Spanner connector DBZ-5937

    • Print the readable data class name in JdbcValueConverters.handleUnknownData DBZ-5946

    Fixes

    • Cannot expand JSON payload with nested arrays of objects DBZ-5344

    • field.exclude.list in MongoDB Connector v2.0 doesn’t accept * as a wildcard for collectionName DBZ-5818

    • Debezium UI documentation link is not accessible to the user via documentation side navigation menu. DBZ-5900

    • Toasted json/int/bigint arrays are not properly processed DBZ-5936

    • No table filters found for filtered publication DBZ-5949

    Other changes

    There are no other changes in this release.

    Release 2.1.0.Beta1 (December 16th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    The Cassandra range tombstone information was a plain string. Now logical object with parsed data and types is provided (DBZ-5912).

    The Cassandra TimeUUID datatype was propadagated as binary object but documented as string. This was incorrect and it is now propagated as string (DBZ-5923).

    New features

    • Postgres: Disable LSN confirmation to database DBZ-5811

    • Realize data distribution according to specified fields DBZ-5847

    • Support predicate parameters in Debezium Server DBZ-5940

    • Use the Patternfly database icon as a placeholder for Oracle Database DBZ-5941

    Fixes

    • Handle toasted String array DBZ-4941

    • Cassandra deletes log files on exit when real time processing is enabled DBZ-5776

    • ReplicationConnectionIT test fails DBZ-5800

    • MongoDB docs for incremental snapshots is SQL specific DBZ-5804

    • Conflicting documentation for snapshot.mode property in MongoDB connector v2.0 DBZ-5812

    • IllegalStateException is thrown if task is recovering while other tasks are running DBZ-5855

    • Negative decimal number scale is not supported by Avro DBZ-5880

    • Connector deployment instructions provide incorrect Maven path for Debezium scripting component DBZ-5882

    • Incorrect Streams Kafka version in connector deployment instructions for creating a custom image DBZ-5883

    • Run postgres connector RecordsStreamProducerIT failed DBZ-5895

    • Suppport INSERT INTO statements with dots in column names DBZ-5904

    • Incorrect default value for additional-condition docs DBZ-5906

    • ConnectorLifecycle is not logging anymore the exception stacktrace when startup fails DBZ-5908

    • Debezium Server stops with NPE when Redis does not report the "maxmemory" field in "info memory" command DBZ-5911

    • PostgresConnectorIT#shouldAckLsnOnSourceByDefault and #shouldNotAckLsnOnSource fails DBZ-5914

    • SQL Server connector database.instance config option is ignored DBZ-5924

    • Wrong java version in Installing Debezium documentation DBZ-5928

    • Toasted varchar array is not correctly processed DBZ-5944

    Other changes

    • Use static import for Assertions in all tests DBZ-2432

    • Test window function in MySQL parser DBZ-3576

    • Run test against Apicurio registry DBZ-5838

    • Add tests against multinode RS and (ideally) sharded cluster DBZ-5857

    • Update documentation for Debezium Server with Cassandra Connector DBZ-5885

    • Allow CI deploy clusters to PSI DBZ-5887

    • Mariadb and Mysql have different syntax DBZ-5888

    • Execute IT tests in alphabetical order DBZ-5889

    • Migrate debezium-server-nats-jetstream to AssertJ DBZ-5901

    • Reduce jenkins jobs footprint DBZ-5905

    • Move Debezium Cassandra connector out from incubation DBZ-5922

    • Clean up "doSnapshot" config code DBZ-5931

    • Version badge on README in Cassandra connector is stuck DBZ-5932

    • Make startup of Cassandra container faster DBZ-5933

    • Fix logging for tests for Cassandra connector DBZ-5934

    Release 2.1.0.Alpha2 (November 30th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.1.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium REST extension was not deployed in Debezium container and has to be added by the user. This is no longer necessary as the REST expension is included (DBZ-4303).

    Debezium images were upgraded to use Fedora 37 (DBZ-5461).

    PostgreSQL connector could resume streaming from re-created replication slot even if it no longer contained data that connector has not seen. The result could be a silent data loss. Now the connector checks if the resume point is present and fails to start if it is not (DBZ-5739).

    New features

    • Expose Cassandra Connector via Debezium Server DBZ-2098

    • Validate Debezium Server configuration properties DBZ-4720

    • Enable pass-thru of additional config options in Debezium UI DBZ-5324

    • Sink adapter for Nats JetStream DBZ-5772

    • Replace obsolete DebeziumDownload attribute DBZ-5835

    • Reduce container image sizes by consolidating operations per layer DBZ-5864

    • Typo error in Oracle connector documentation 2.0 DBZ-5877

    Fixes

    • Embedded Engine or Server retrying indefinitely on all types of retriable errors DBZ-5661

    • PostgreSQL missing metadata info DBZ-5789

    • For outbox transformation, when 'table.expand.json.payload' is set to true null values are not correctly deserialized DBZ-5796

    • Cassandra decimal values are not deserialized using Debezium Cassandra Connector DBZ-5807

    • Cassandra varint type is currently not supported DBZ-5808

    • 'topic.prefix' default value in MongoDB connector v2.0 DBZ-5817

    • Quarkus outbox extention never finishes the open tracing span DBZ-5821

    • fix names of range fields in schema to comply with Avro standard DBZ-5826

    • ExtractNewDocumentState does not support updateDescription.updatedFields field DBZ-5834

    • CREATE/ALTER user does not support COMMENT token DBZ-5836

    • Invalid Java object for schema with type FLOAT64: class java.lang.Float DBZ-5843

    • Message contents might not get logged in case of error DBZ-5874

    • CREATE/ALTER user does not support ATTRIBUTE token DBZ-5876

    Other changes

    • SQL table rename affect on Kafka connector and topic DBZ-5423

    • Create RHAF version of Debezium docs DBZ-5729

    • Add Debezium doc section to RHAF DBZ-5730

    • Create new Debezium section in the docs. DBZ-5731

    • Add Debezium docs to DDF DBZ-5732

    • Create ARO provisioning job DBZ-5742

    • Amend Confluent Avro converter installation documentation DBZ-5762

    • Modify ocp system tests to archive test results and logs DBZ-5785

    • GitHub Actions: Deprecating save-state and set-output commands DBZ-5824

    • Change logging levels of several schema change handler log entries DBZ-5833

    • Revert running tests against Apicurio registry DBZ-5839

    • Add Kubernetes plugin to Jenkins DBZ-5844

    • OracleConnectorIT shouldIgnoreAllTablesInExcludedSchemas test may randomly fail DBZ-5850

    • Upgrade wildfly-elytron to 1.15.5 / 1.16.1 due to CVE-2021-3642 DBZ-5854

    • Upgrade PostgreSQL example images to Postgres 15 DBZ-5860

    • GitHub Actions deprecation of Node 12 - actions/checkout DBZ-5870

    Release 2.1.0.Alpha1 (November 10th 2022)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.1.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.1.0.Alpha1 plugin files, and restart the connector using the same (when upgrading from the same major version) or updated (when upgrading from an older major version) configuration. Upon restart, the 2.1.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Support for Postgres 15 DBZ-5370

    • Add support for SMT predicates in Debezium Engine DBZ-5530

    • MySQL Connector capture TRUNCATE command as message in table topic DBZ-5610

    • Improve LogMiner query performance by reducing REGEXP_LIKE disjunctions DBZ-5648

    • Expose heartbeatFrequency setting for mongodb connector DBZ-5736

    • Provide Redis storage as store module DBZ-5749

    • Redis Sink wait for Redis Replica writes DBZ-5752

    • Redis sink back-pressure mechanism when Redis memory is almost full DBZ-5782

    • Enhance the ability to sanitize topic name DBZ-5790

    Fixes

    • Using snapshot boundary mode "all" causes DebeziumException on Oracle RAC DBZ-5302

    • ORA-01003: no statement parsed DBZ-5352

    • Missing snapshot pending transactions DBZ-5482

    • Db2 documentation refers to invalid SMALLMONEY and MONEY data types DBZ-5504

    • Using snapshot.mode ALWAYS uses SCN from offsets DBZ-5626

    • MongoDB multiple tasks monitor misalignment DBZ-5629

    • UNIQUE INDEX with NULL value throws exception when lob.enabled is true DBZ-5682

    • Oracle SQL parsing error when collation used DBZ-5726

    • Columns are not excluded when doing incremental snapshots DBZ-5727

    • Unparseable DDL statement DBZ-5734

    • NullPointerException thrown during snapshot of tables in Oracle source connector DBZ-5738

    • Remove note from snapshot metrics docs file that flags incremental snapshots as TP feature DBZ-5748

    • Hostname not available for load balanced ocp services in ARO DBZ-5753

    • Exclude Oracle Compression Advisor tables from capture to avoid infinite loop DBZ-5756

    • More Oracle logging DBZ-5759

    • Oracle should only log row contents at TRACE level DBZ-5760

    • Update system test artifact preparation to reflect naming changes in downstream DBZ-5767

    • Outbox Router documentation outdated regarding value converter DBZ-5770

    • Using DBMS_LOB.ERASE by itself can lead to an unexpected UPDATE with null BLOB value DBZ-5773

    • Suppress logging of undetermined optionality for explicitly excluded columns DBZ-5783

    • Oracle connector does not attempt restart when ORA-01089 exception is nested DBZ-5791

    • Message with LSN 'LSN{XYZ}' not present among LSNs seen in the location phase DBZ-5792

    • The merge method of configuration is not work DBZ-5801

    • Mysql connector alter table with database name parse failed DBZ-5802

    Other changes

    • Execute tests with Apicurio converters DBZ-2131

    • Revision info missing on website DBZ-5083

    • Debezium on ARO sanity testing DBZ-5647

    • SQL Server connector docs should mention multi-task support DBZ-5714

    • Remove downstream TP designation for RAC content in Oracle connector docs DBZ-5735

    • Update Pulsar client to 2.10.1 DBZ-5737

    • Parametrize Strimzi operator name to enable multiple testsuites running on same cluster DBZ-5744

    • Enable CI to report results to ReportPortal instance DBZ-5745

    • Debezium connectors ship with an old version of google-protobuf vulnerable to CVE-2022-3171 DBZ-5747

    • Testsuite unable to connect to SQLServer due to encryption DBZ-5763

    • Testsuite uses incorrect jdbc driver class for SQLServer with docker DBZ-5764

    • Upgrade com.jayway.jsonpath:json-path DBZ-5766

    • Product profile is not used when running Oracle matrix against downstream DBZ-5768

    • Upgrade to Quarkus 2.14.CR1 DBZ-5774

    • Switch from Fest to AssertJ DBZ-5779

    • Upgrade postgres driver to version 42.5.0 DBZ-5780

    • Upgrade to Quarkus 2.14.0.Final DBZ-5786

    • Doc Typo in cloudevents DBZ-5788

    • Fix DB2 reporting script path DBZ-5799

    • Add ORA-01555 to Oracle documentation DBZ-5816

    • Change visibility of BaseSourceTask#logStatistics method to protected DBZ-5822

    • Upgrade Postgres images to Debian 11 DBZ-5823

    \ No newline at end of file diff --git a/releases/2.2/index.html b/releases/2.2/index.html index 68ff14355a..134117c7b5 100644 --- a/releases/2.2/index.html +++ b/releases/2.2/index.html @@ -1 +1 @@ - Debezium Release Series 2.2

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.32
    MongoDB Database: 4.2, 4.4, 5.0, 6.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14, 15
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.5.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0, 21.3.0.0, 21.4.0.0, 21.5.0.0, 21.6.0.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.2.0.CR1

    2023-04-14
    Upgrade to Quarkus 3.x; Pulsar sink performance boost; Incubating support for MongoDB incremental snapshots on multi-replica set and sharding deployments; Jolokia added into the Connect image; Deprecating Docker Hub registry

    2.2.0.Alpha3

    2023-03-08
    New Cassandra for Cassandra Enterprise; Initial snapshots can snapshot tables in parallel (experimental); All Debezium Server sinks support headers; Server-side databas/collection and data filtering for MongoDB
    \ No newline at end of file + Debezium Release Series 2.2

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.32
    MongoDB Database: 4.2, 4.4, 5.0, 6.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14, 15
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.5.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0, 21.3.0.0, 21.4.0.0, 21.5.0.0, 21.6.0.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.2.0.CR1

    2023-04-14
    Upgrade to Quarkus 3.x; Pulsar sink performance boost; Incubating support for MongoDB incremental snapshots on multi-replica set and sharding deployments; Jolokia added into the Connect image; Deprecating Docker Hub registry

    2.2.0.Alpha3

    2023-03-08
    New Cassandra for Cassandra Enterprise; Initial snapshots can snapshot tables in parallel (experimental); All Debezium Server sinks support headers; Server-side databas/collection and data filtering for MongoDB
    \ No newline at end of file diff --git a/releases/2.2/release-notes.html b/releases/2.2/release-notes.html index bd1a01a1e2..71fedc5ace 100644 --- a/releases/2.2/release-notes.html +++ b/releases/2.2/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 2.2

    Release Notes for Debezium 2.2

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.2.1.Final (May 12nd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • log.mining.transaction.retention.hours should reference last offset and not sysdate DBZ-6355

    Fixes

    • Toasted varying character array and date array are not correcly processed DBZ-6122

    • Using pg_replication_slot_advance which is not supported by PostgreSQL10. DBZ-6353

    • 'CREATE TABLE t (c NATIONAL CHAR)' parsing failed DBZ-6357

    • Toasted hstore are not correcly processed DBZ-6379

    • Snapshotting does not work for hstore in Map mode DBZ-6384

    • Oracle DDL shrink space for table partition can not be parsed DBZ-6386

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-6396

    • MySql in debezium-parser-ddl :The inserted sql statement reports an error DBZ-6401

    • MongoDB connector crashes on invalid resume token DBZ-6402

    • New SMT HeaderToValue not working DBZ-6411

    • Debezium Server 2.2.0.Final BOM refers to debezium-build-parent 2.2.0-SNAPSHOT DBZ-6437

    • Oracle Connector failed parsing DDL Statement DBZ-6442

    • Oracle DDL shrink space for index partition can not be parsed DBZ-6446

    • Fix existing bug in information schema query in the Spanner connector DBZ-6385

    • change logging level of skip.messages.without.change DBZ-6391

    • Include redo/archive log metadata on ORA-01291 exceptions DBZ-6436

    Other changes

    • Base the "replaceable" build numbers in legacy deployment instructions on debezium-build-number attribute DBZ-6371

    Release 2.2.0.Final (April 20th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Describe Postgres example configuration for Debezium Server DBZ-6325

    • Improve messages in Redis sink in case of OOM DBZ-6346

    • Stream shard list for debezium vitess connector DBZ-6356

    Fixes

    • If column.include.list/column.exclude.list are used and the target table receives an update for the excluded (or not included) column - such events should be ignored DBZ-2979

    • Connector offsets do not advance on transaction commit with filtered events when LOB enabled DBZ-5395

    • Task failure when index is made on primary columns of table. DBZ-6238

    • Oracle connector doesn’t need to verify redo log when snapshotting only DBZ-6276

    • MySQL connector cannot parse table with SYSTEM VERSIONING DBZ-6331

    • MySql in debezium-parser-ddl does not support with keyword parsing DBZ-6336

    • Duplicate JMX MBean names when multiple vitess tasks running in the same JVM DBZ-6347

    • KafkaSignalThread#SIGNAL_POLL_TIMEOUT_MS option duplicate signal prefix DBZ-6361

    Other changes

    • Complete MongoDB incremental snapshotting implementation DBZ-4427

    • Add documentation for the reactive variant of the Quarkus outbox extension DBZ-5859

    • Create an annotation for flaky tests DBZ-6324

    • 2.1.4 post-release documentation fixes DBZ-6351

    Release 2.2.0.CR1 (April 14th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Quarkus was upgraded to version 3. As Quarkus is now based on Jakarta EE 10 the package names has been changed from javax. to jakarta.. If you use Debezium outbox extension or you have dependency on Debezium in your project then it is possible that you’ll either need to update your dependency management or the source code (DBZ-6129).

    New features

    • Capture evenents in order across mongodb shards DBZ-5590

    • Pass through configurations for kafka topics/configuration DBZ-6262

    • Enable the docker tag to be configurable in the Spanner connector DBZ-6302

    • Support async producer for Pulsar sink to improve performance DBZ-6319

    Fixes

    • Failed retriable operations are retried infinitely DBZ-4488

    • DDL events not stored in schema history topic for excluded tables DBZ-6070

    • Oracle path used current batchSize to calculate end scn is wrong, need to use min batch size DBZ-6155

    • Multiplatform build of example-postres fails DBZ-6258

    • Add protoc version property to postgres connector pom.xml DBZ-6261

    • Postgres connector doesn’t need logical WAL level when snapshotting only DBZ-6265

    • MySQL connector doesn’t need to query binlog when snapshotting only DBZ-6271

    • Table names with spaces are not correctly deserialized when using an Infinispan cache as the transaction buffer DBZ-6273

    • Transaction buffer state can become corrupted when using Infinispan cache with LOBs DBZ-6275

    • DDL statement couldn’t be parsed - Oracle connector 2.1.3.Final DBZ-6314

    • Unparsable DDL statements (MySQL/MariaDB) DBZ-6316

    • Cassandra 3 cannot be built using JDK20 DBZ-6320

    Other changes

    • Upgrade dependencies (Quarkus, etc) of Debezium UI DBZ-4109

    • UI- Add the UI to configure the additional properties for a connector DBZ-5365

    • Upgrade UI build to use Debezium 2.2 or latest DBZ-6173

    • Oracle-Connector dbz##user needs more rights DBZ-6198

    • Make quay.io primary image repository DBZ-6216

    • Update config properties in RHEL deployment instructions DBZ-6266

    • Fix errors in downstream Getting Started guide DBZ-6268

    • Address review feedback in downstream RHEL and OCP installation guides DBZ-6272

    • Infinispan cache configuration used by Oracle tests are not compatible with Infinispan 14.0.2 DBZ-6274

    • Remove unused/migrated jobs from upstream repository DBZ-6299

    • Upgrade MySQL JDBC driver to 8.0.32 DBZ-6304

    • Allow specifying docker image reference in MongoDB testcontainers implementation DBZ-6305

    • Use MongoDbContainer instead of MongoDBContainer test containers class in ConnectorConfiguration class DBZ-6306

    • Add documentation for JDBC sink connector DBZ-6310

    • Fix all compliance warnings for Jenkins DBZ-6315

    • Remove outdated information about SYS user accounts with Oracle DBZ-6318

    • Bundle Jolokia with Debezium connect image DBZ-6323

    Release 2.2.0.Beta1 (March 31st 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Debezium JDBC Sink Connector DBZ-3647

    • Create an endpoint to update a connector DBZ-5314

    • Refactor snapshotting to use change streams instead of oplog DBZ-5987

    • Update the design for Debezium based connectors Filter step DBZ-6060

    • Connect and stream from sharded clusters through mongos instances DBZ-6170

    • Support Postgres dialect for Spanner Kafka Connector DBZ-6178

    • Support Azure blob storage as Debezium history storage DBZ-6180

    • Support Database role in Connector Config. DBZ-6192

    • Remove duplicated createDdlFilter method from historized connector config DBZ-6197

    • Create new SMT to copy/move header to record value DBZ-6201

    • Add support for columns of type "bytea[]" - array of bytea (byte array) DBZ-6232

    • Support ImageFromDockerfile with Debezium’s testcontainers suite DBZ-6244

    • Expose EmbeddedEngine configurations DBZ-6248

    • RabbitMQ Sink DBZ-6260

    Fixes

    • NPE when setting schema.history.internal.store.only.captured.tables.ddl=true DBZ-6072

    • Postgres connector stuck when replication slot does not have confirmed_flush_lsn DBZ-6092

    • java.lang.NullPointerException in MySQL connector with max.queue.size.in.bytes DBZ-6104

    • debezium-connector-mysql failed to parse serveral DDLs of 'CREATE TABLE' DBZ-6124

    • Zerofill property failed for different int types DBZ-6185

    • GRANT DELETE HISTORY couldn’t be parsed in mariadb DBZ-6186

    • ddl parse failed for key partition table DBZ-6188

    • Config options internal.schema.history.internal.ddl.filter not working DBZ-6190

    • Use CHARSET for alterByConvertCharset clause DBZ-6194

    • Data loss upon connector restart DBZ-6204

    • ParsingException: DDL statement couldn’t be parsed DBZ-6217

    • The CHARACTER/CHARACTER(p)/CHARACTER VARYING(p) data types not recognized as JDBC type CHAR DBZ-6221

    • MySQL treats the BOOLEAN synonym differently when processed in snapshot vs streaming phases. DBZ-6225

    • MySQL treats REAL synonym differently when processed in snapshot vs streaming phases. DBZ-6226

    • Spanner Connector - Deadlock in BufferedPublisher when publish gives exception DBZ-6227

    • Publish of sync event fails when message becomes very large. DBZ-6228

    • MySQL treats NCHAR/NVARCHAR differently when processed in snapshot vs streaming phases. DBZ-6231

    • MySQL singleDeleteStatement parser does not support table alias DBZ-6243

    • Testcontainers MongoDbReplicaSetTest failing with MongoDB 4.2 DBZ-6247

    • Wrong error thrown when snapshot.custom_class=custom and no snapshot.custom.class DBZ-6249

    • Missing GEOMETRY keyword which can be used as column name DBZ-6250

    • Postgres connector stuck trying to fallback to restart_lsn when replication slot confirmed_flush_lsn is null. DBZ-6251

    • MariaDB’s UUID column type cannot be parsed when scheme is loaded DBZ-6255

    Other changes

    • Document message.key.columns and tombstone events limitations for default REPLICA IDENTITY DBZ-5490

    • Reflect configuration changes for MongoDB connector in documentation DBZ-6090

    • Create Oracle CI workflow DBZ-6115

    • Provide instructions for upgrading from Debezium 1.x to 2.x DBZ-6128

    • Update connector configuration examples in deployment instructions DBZ-6153

    • Insert missing Nebel annotations for Oracle connector FAQ topic DBZ-6215

    • Add metadata for MongoDB change streams topic DBZ-6223

    • Remove incubation notice from Debezium Server page DBZ-6235

    • Ensure correct build for Oracle CI in case of pull request DBZ-6239

    • Fix broken link to Streams documentation in shared deployment files DBZ-6263

    • Update config example in Installing Debezium on OpenShift DBZ-6267

    Release 2.2.0.Alpha3 (March 8th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Alpha3 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Alpha3 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Alpha3 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium was truncating on timezoned types milli/microsecond zeroes regardless of the schema width setting. This is no longer the case and Debezium provides the correct number of trailing zeroes (DBZ-6163).

    New features

    • Optionally parallelize initial snapshots DBZ-823

    • Server side database and collection filtering on MongoDB change stream DBZ-5102

    • Create a Datastax connector based on Cassandra connector DBZ-5951

    • Add support for honouring MongoDB read preference in change stream after promotion DBZ-5953

    • Add support for header to all Debezium Server sinks DBZ-6017

    • Add support for surrogate keys for incremental snapshots DBZ-6023

    • Support String type for key in Mongo incremental snapshot DBZ-6116

    • fix typo in sqlserver doc. change "evemts" to "events". DBZ-6123

    • Support change stream filtering using MongoDB’s aggregation pipeline step DBZ-6131

    • Remove hardcoded list of system database exclusions that are not required for change streaming DBZ-6152

    Fixes

    • When using snapshot.collection.include.list, relational schema isn’t populated correctly DBZ-3594

    • Debezium UI should use fast-jar again with Quarkus 2.x DBZ-4621

    • GCP Spanner connector start failing when there are multiple indexes on a single column DBZ-6101

    • Negative remaining attempts on MongoDB reconnect case DBZ-6113

    • Tables with spaces or non-ASCII characters in their name are not captured by Oracle because they must be quoted. DBZ-6120

    • Offsets are not advanced in a CDB deployment with low frequency of changes to PDB DBZ-6125

    • Oracle TIMESTAMP WITH TIME ZONE is emitted as GMT during snapshot rather than the specified TZ DBZ-6143

    • Debezium UI E2E Frontend build failing randomly with corrupted Node 16 tar file DBZ-6146

    • Debezium UI SQL Server tests randomly fail due to slow agent start-up DBZ-6149

    • RelationalSnapshotChangeEventSource swallows exception generated during snapshot DBZ-6179

    Other changes

    • Remove redundancies between MySqlJdbcContext and MySqlConnection DBZ-4855

    • Refactor connection management for mongodb connector DBZ-6032

    • Conditionalization anomalies in Oracle connector doc DBZ-6073

    • Optimize debezium-testing-system image to build only modules necessary for tests DBZ-6108

    • Migrate system test jobs to gitlab DBZ-6109

    • Remove references to adding configuration settings to a .properties file DBZ-6130

    • Fix Debezium Server Redis random test failures DBZ-6133

    • Allow TestContainers test framework to expose ConnectorConfiguration as JSON DBZ-6136

    • Upgrade impsort-maven-plugin from 1.7.0 to 1.8.0 DBZ-6144

    • Upgrade Quarkus dependencies to 2.16.3.Final DBZ-6150

    • Github workflows not working for Cassandra job (step Build Debezium Connector Cassandra) DBZ-6171

    • Create SSL scenarios for integration tests for MySQL connector DBZ-6184

    Release 2.2.0.Alpha2 (February 16th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium mapped non-ASCII characters into underscores in topic and schema names. This could lead into conflicts in case of names differing with only non-ASCII characters. Debezium now provides a strategy to map the characters uniquely. As by-product it is no longer possible to use sanitize.field.names config option (DBZ-5743).

    Debezium Server was extracted from the main repository and is now located and built from its won separate repository. This allowed the build process to include the non-core connectors in the assembly package (DBZ-6049).

    SSN field from Oracle connector was propagated as INT32 in the source info block. This could lead to overflows on certain installations so the field is now propagated as INT64 (DBZ-6091).

    New features

    • Better control on debezium GTID usage DBZ-2296

    • Adding new option for "ExtractNewRecordState" SMT to exclude unchanged fields DBZ-5283

    • Reactive implementation of Outbox module DBZ-5758

    • Debezium MongoDB connector wizard Filter definition page needs work DBZ-5899

    • Debezium Storage add support for Apache RocketMQ DBZ-5997

    • debezium-server Pulsar support non-default tenant and namespace DBZ-6033

    • Add wallTime in mongodb source info DBZ-6038

    • Vitess: Support Mapping unsigned bigint mysql column type to long DBZ-6043

    • Increase query.fetch.size default to something sensible above zero DBZ-6079

    • Expose sequence field in CloudEvents message id DBZ-6089

    • Reduce verbosity of skipped transactions if transaction has no events relevant to captured tables DBZ-6094

    • Upgrade Kafka client to 3.4.0 DBZ-6102

    Fixes

    • Not all connectors are available in debezium server DBZ-4038

    • Property event.processing.failure.handling.mode is not present in MySQL documentation DBZ-4829

    • Data type conversion failed for mysql bigint DBZ-5798

    • ActivateTracingSpan wrong timestamps reported DBZ-5827

    • Unable to specify column or table include list if name contains a backslash \ DBZ-5917

    • debezium-connector-cassandra 2.1.0.Alpha2 plugin can no longer run "out of the box" DBZ-5925

    • MongoDB Incremental Snapshot not Working DBZ-5973

    • Nullable columns marked with "optional: false" in DDL events DBZ-6003

    • Vitess: Handle the shard list difference between current db shards and persisted shards DBZ-6011

    • DDL statement with TokuDB engine specific "CLUSTERING KEY" couldn’t be parsed DBZ-6016

    • DDL parse fail for role revoke with "user-like" role name DBZ-6019

    • DDL parse fail for ALTER USER x DEFAULT ROLE y; DBZ-6020

    • Offsets are not flushed on connect offsets topic when encountering an error on Postgres connector DBZ-6026

    • Unexpected format for TIME column: 8:00 DBZ-6029

    • Oracle does not support compression/logging clauses after an LOB storage clause DBZ-6031

    • Debezium is logging the full message along with the error DBZ-6037

    • Improve resilience during internal schema history recovery from Kafka DBZ-6039

    • Incremental snapshot sends the events from signalling DB to Kafka DBZ-6051

    • Mask password in log statement DBZ-6064

    • Loading Custom offset storage fails with Class not found error DBZ-6075

    • SQL Server tasks fail if the number of databases is smaller than maxTasks DBZ-6084

    • When using LOB support, an UPDATE against multiple rows can lead to inconsistent event data DBZ-6107

    Other changes

    • System test-suite ability to prepare OCP environment DBZ-3832

    • TransactionMetadataIT is unstable for Db2 DBZ-5149

    • Update Java Outreach job to use Java 20 DBZ-5825

    • Upgrade to Quarkus 2.16.0.Final DBZ-6005

    • Prepare MongoDB ExtractNewDocumentState SMT doc for downstream GA DBZ-6006

    • SQL Server IncrementalSnapshotWithRecompileIT fails randomly DBZ-6035

    • Remove the redundant "schema.history.internal" from MySqlConnectorConfig DBZ-6040

    • Broken links on FAQ DBZ-6042

    • Upgrade Kafka to 3.3.2 DBZ-6054

    • Upgrade netty version in Pravega to 4.1.86.Final DBZ-6057

    • Return back the driver class option for MySQL connector DBZ-6059

    • Invalid links breaking downstream documentation build DBZ-6069

    • Request SA for UMB DBZ-6077

    • Create certificates for Jenkins for UMB DBZ-6078

    • Request access to cpass UMB topic DBZ-6080

    • Broken debezium-server source file link on docs page DBZ-6111

    Release 2.2.0.Alpha1 (January 19th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    ZonedTimestamp strings were sent with fractional second trailing zeroes removed. Current behaviour is to provide the trailing zeroes padded to the length/scale of the source column (DBZ-5996).

    New features

    • Remove redundant modifiers of members for interface fields DBZ-2439

    • Allow reading from read-only Oracle standby disaster/recovery DBZ-3866

    • Remove option for specifying driver class from MySQL Connector DBZ-4663

    • Support S3 bucket as Dezbium history store DBZ-5402

    • Update the DBZ-UI documentation page to incorporate the recently added "Custom properties" step details DBZ-5878

    • Support retrying database connection failures during connector start DBZ-5879

    • Add support for Connect Headers to Debezium Server DBZ-5926

    • Sink adapter for Apache RocketMQ DBZ-5962

    • Sink adapter for Infinispan DBZ-5986

    • Add custom Debezium banner to Debezium Server DBZ-6004

    • Postgres LSN check should honor event.processing.failure.handling.mode DBZ-6012

    • Enhance the Spanner connector by adding features and/or solving bugs DBZ-6014

    Fixes

    • Debezium is not working with apicurio and custom truststores DBZ-5282

    • Show/Hide password does not work on Connectors View details screen DBZ-5322

    • Snapshotter#snapshotCompleted is invoked regardless of snapshot result DBZ-5852

    • Oracle cannot undo change DBZ-5907

    • Postgresql Data Loss on restarts DBZ-5915

    • Oracle Multithreading lost data DBZ-5945

    • Spanner connector is missing JSR-310 dependency DBZ-5959

    • Truncate records incompatible with ExtractNewRecordState DBZ-5966

    • Computed partition must not be negative DBZ-5967

    • Stream tag images are not published DBZ-5979

    • Table size log message for snapshot.select.statement.overrides tables not correct DBZ-5985

    • NPE in execute snapshot signal with exclude.tables config on giving wrong table name DBZ-5988

    • There is a problem with postgresql connector parsing the boundary value of money type DBZ-5991

    • Log statement for unparseable DDL statement in MySqlDatabaseSchema contains placeholder DBZ-5993

    • Synchronize all actions with core CI & fix GitHub Actions set-output command DBZ-5998

    • Postgresql connector parses the null of the money type into 0 DBZ-6001

    • Run PostgresConnectorIT.shouldReceiveChangesForChangeColumnDefault() failed DBZ-6002

    Other changes

    • Plug-in version information duplicated DBZ-4669

    • Move common code in Cassandra connector core module DBZ-5950

    • website-builder image cannot be built DBZ-5971

    • Zookeeper 3.6.3 available only on archive DBZ-5972

    • Jenkins pipelines don’t provide information about FAILURE status DBZ-5974

    • Remove incubating documentation text for MongoDB ExtractNewDocumentState SMT DBZ-5975

    • Use replace rather than replaceAll DBZ-5976

    • Upgrade Apicurio to 2.4.1.Final DBZ-5977

    • Upgrade JDBC driver to 42.5.1 DBZ-5980

    • Update TestContainers to 1.17.6 DBZ-5990

    • Align pipeline tests with new connector pipelines DBZ-5999

    • Db2 incremental snapshot test execution is blocked DBZ-6008

    \ No newline at end of file + Release Notes for Debezium 2.2

    Release Notes for Debezium 2.2

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.2.1.Final (May 12nd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • log.mining.transaction.retention.hours should reference last offset and not sysdate DBZ-6355

    Fixes

    • Toasted varying character array and date array are not correcly processed DBZ-6122

    • Using pg_replication_slot_advance which is not supported by PostgreSQL10. DBZ-6353

    • 'CREATE TABLE t (c NATIONAL CHAR)' parsing failed DBZ-6357

    • Toasted hstore are not correcly processed DBZ-6379

    • Snapshotting does not work for hstore in Map mode DBZ-6384

    • Oracle DDL shrink space for table partition can not be parsed DBZ-6386

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-6396

    • MySql in debezium-parser-ddl :The inserted sql statement reports an error DBZ-6401

    • MongoDB connector crashes on invalid resume token DBZ-6402

    • New SMT HeaderToValue not working DBZ-6411

    • Debezium Server 2.2.0.Final BOM refers to debezium-build-parent 2.2.0-SNAPSHOT DBZ-6437

    • Oracle Connector failed parsing DDL Statement DBZ-6442

    • Oracle DDL shrink space for index partition can not be parsed DBZ-6446

    • Fix existing bug in information schema query in the Spanner connector DBZ-6385

    • change logging level of skip.messages.without.change DBZ-6391

    • Include redo/archive log metadata on ORA-01291 exceptions DBZ-6436

    Other changes

    • Base the "replaceable" build numbers in legacy deployment instructions on debezium-build-number attribute DBZ-6371

    Release 2.2.0.Final (April 20th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Describe Postgres example configuration for Debezium Server DBZ-6325

    • Improve messages in Redis sink in case of OOM DBZ-6346

    • Stream shard list for debezium vitess connector DBZ-6356

    Fixes

    • If column.include.list/column.exclude.list are used and the target table receives an update for the excluded (or not included) column - such events should be ignored DBZ-2979

    • Connector offsets do not advance on transaction commit with filtered events when LOB enabled DBZ-5395

    • Task failure when index is made on primary columns of table. DBZ-6238

    • Oracle connector doesn’t need to verify redo log when snapshotting only DBZ-6276

    • MySQL connector cannot parse table with SYSTEM VERSIONING DBZ-6331

    • MySql in debezium-parser-ddl does not support with keyword parsing DBZ-6336

    • Duplicate JMX MBean names when multiple vitess tasks running in the same JVM DBZ-6347

    • KafkaSignalThread#SIGNAL_POLL_TIMEOUT_MS option duplicate signal prefix DBZ-6361

    Other changes

    • Complete MongoDB incremental snapshotting implementation DBZ-4427

    • Add documentation for the reactive variant of the Quarkus outbox extension DBZ-5859

    • Create an annotation for flaky tests DBZ-6324

    • 2.1.4 post-release documentation fixes DBZ-6351

    Release 2.2.0.CR1 (April 14th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Quarkus was upgraded to version 3. As Quarkus is now based on Jakarta EE 10 the package names has been changed from javax. to jakarta.. If you use Debezium outbox extension or you have dependency on Debezium in your project then it is possible that you’ll either need to update your dependency management or the source code (DBZ-6129).

    New features

    • Capture evenents in order across mongodb shards DBZ-5590

    • Pass through configurations for kafka topics/configuration DBZ-6262

    • Enable the docker tag to be configurable in the Spanner connector DBZ-6302

    • Support async producer for Pulsar sink to improve performance DBZ-6319

    Fixes

    • Failed retriable operations are retried infinitely DBZ-4488

    • DDL events not stored in schema history topic for excluded tables DBZ-6070

    • Oracle path used current batchSize to calculate end scn is wrong, need to use min batch size DBZ-6155

    • Multiplatform build of example-postres fails DBZ-6258

    • Add protoc version property to postgres connector pom.xml DBZ-6261

    • Postgres connector doesn’t need logical WAL level when snapshotting only DBZ-6265

    • MySQL connector doesn’t need to query binlog when snapshotting only DBZ-6271

    • Table names with spaces are not correctly deserialized when using an Infinispan cache as the transaction buffer DBZ-6273

    • Transaction buffer state can become corrupted when using Infinispan cache with LOBs DBZ-6275

    • DDL statement couldn’t be parsed - Oracle connector 2.1.3.Final DBZ-6314

    • Unparsable DDL statements (MySQL/MariaDB) DBZ-6316

    • Cassandra 3 cannot be built using JDK20 DBZ-6320

    Other changes

    • Upgrade dependencies (Quarkus, etc) of Debezium UI DBZ-4109

    • UI- Add the UI to configure the additional properties for a connector DBZ-5365

    • Upgrade UI build to use Debezium 2.2 or latest DBZ-6173

    • Oracle-Connector dbz##user needs more rights DBZ-6198

    • Make quay.io primary image repository DBZ-6216

    • Update config properties in RHEL deployment instructions DBZ-6266

    • Fix errors in downstream Getting Started guide DBZ-6268

    • Address review feedback in downstream RHEL and OCP installation guides DBZ-6272

    • Infinispan cache configuration used by Oracle tests are not compatible with Infinispan 14.0.2 DBZ-6274

    • Remove unused/migrated jobs from upstream repository DBZ-6299

    • Upgrade MySQL JDBC driver to 8.0.32 DBZ-6304

    • Allow specifying docker image reference in MongoDB testcontainers implementation DBZ-6305

    • Use MongoDbContainer instead of MongoDBContainer test containers class in ConnectorConfiguration class DBZ-6306

    • Add documentation for JDBC sink connector DBZ-6310

    • Fix all compliance warnings for Jenkins DBZ-6315

    • Remove outdated information about SYS user accounts with Oracle DBZ-6318

    • Bundle Jolokia with Debezium connect image DBZ-6323

    Release 2.2.0.Beta1 (March 31st 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Debezium JDBC Sink Connector DBZ-3647

    • Create an endpoint to update a connector DBZ-5314

    • Refactor snapshotting to use change streams instead of oplog DBZ-5987

    • Update the design for Debezium based connectors Filter step DBZ-6060

    • Connect and stream from sharded clusters through mongos instances DBZ-6170

    • Support Postgres dialect for Spanner Kafka Connector DBZ-6178

    • Support Azure blob storage as Debezium history storage DBZ-6180

    • Support Database role in Connector Config. DBZ-6192

    • Remove duplicated createDdlFilter method from historized connector config DBZ-6197

    • Create new SMT to copy/move header to record value DBZ-6201

    • Add support for columns of type "bytea[]" - array of bytea (byte array) DBZ-6232

    • Support ImageFromDockerfile with Debezium’s testcontainers suite DBZ-6244

    • Expose EmbeddedEngine configurations DBZ-6248

    • RabbitMQ Sink DBZ-6260

    Fixes

    • NPE when setting schema.history.internal.store.only.captured.tables.ddl=true DBZ-6072

    • Postgres connector stuck when replication slot does not have confirmed_flush_lsn DBZ-6092

    • java.lang.NullPointerException in MySQL connector with max.queue.size.in.bytes DBZ-6104

    • debezium-connector-mysql failed to parse serveral DDLs of 'CREATE TABLE' DBZ-6124

    • Zerofill property failed for different int types DBZ-6185

    • GRANT DELETE HISTORY couldn’t be parsed in mariadb DBZ-6186

    • ddl parse failed for key partition table DBZ-6188

    • Config options internal.schema.history.internal.ddl.filter not working DBZ-6190

    • Use CHARSET for alterByConvertCharset clause DBZ-6194

    • Data loss upon connector restart DBZ-6204

    • ParsingException: DDL statement couldn’t be parsed DBZ-6217

    • The CHARACTER/CHARACTER(p)/CHARACTER VARYING(p) data types not recognized as JDBC type CHAR DBZ-6221

    • MySQL treats the BOOLEAN synonym differently when processed in snapshot vs streaming phases. DBZ-6225

    • MySQL treats REAL synonym differently when processed in snapshot vs streaming phases. DBZ-6226

    • Spanner Connector - Deadlock in BufferedPublisher when publish gives exception DBZ-6227

    • Publish of sync event fails when message becomes very large. DBZ-6228

    • MySQL treats NCHAR/NVARCHAR differently when processed in snapshot vs streaming phases. DBZ-6231

    • MySQL singleDeleteStatement parser does not support table alias DBZ-6243

    • Testcontainers MongoDbReplicaSetTest failing with MongoDB 4.2 DBZ-6247

    • Wrong error thrown when snapshot.custom_class=custom and no snapshot.custom.class DBZ-6249

    • Missing GEOMETRY keyword which can be used as column name DBZ-6250

    • Postgres connector stuck trying to fallback to restart_lsn when replication slot confirmed_flush_lsn is null. DBZ-6251

    • MariaDB’s UUID column type cannot be parsed when scheme is loaded DBZ-6255

    Other changes

    • Document message.key.columns and tombstone events limitations for default REPLICA IDENTITY DBZ-5490

    • Reflect configuration changes for MongoDB connector in documentation DBZ-6090

    • Create Oracle CI workflow DBZ-6115

    • Provide instructions for upgrading from Debezium 1.x to 2.x DBZ-6128

    • Update connector configuration examples in deployment instructions DBZ-6153

    • Insert missing Nebel annotations for Oracle connector FAQ topic DBZ-6215

    • Add metadata for MongoDB change streams topic DBZ-6223

    • Remove incubation notice from Debezium Server page DBZ-6235

    • Ensure correct build for Oracle CI in case of pull request DBZ-6239

    • Fix broken link to Streams documentation in shared deployment files DBZ-6263

    • Update config example in Installing Debezium on OpenShift DBZ-6267

    Release 2.2.0.Alpha3 (March 8th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Alpha3 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Alpha3 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Alpha3 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium was truncating on timezoned types milli/microsecond zeroes regardless of the schema width setting. This is no longer the case and Debezium provides the correct number of trailing zeroes (DBZ-6163).

    New features

    • Optionally parallelize initial snapshots DBZ-823

    • Server side database and collection filtering on MongoDB change stream DBZ-5102

    • Create a Datastax connector based on Cassandra connector DBZ-5951

    • Add support for honouring MongoDB read preference in change stream after promotion DBZ-5953

    • Add support for header to all Debezium Server sinks DBZ-6017

    • Add support for surrogate keys for incremental snapshots DBZ-6023

    • Support String type for key in Mongo incremental snapshot DBZ-6116

    • fix typo in sqlserver doc. change "evemts" to "events". DBZ-6123

    • Support change stream filtering using MongoDB’s aggregation pipeline step DBZ-6131

    • Remove hardcoded list of system database exclusions that are not required for change streaming DBZ-6152

    Fixes

    • When using snapshot.collection.include.list, relational schema isn’t populated correctly DBZ-3594

    • Debezium UI should use fast-jar again with Quarkus 2.x DBZ-4621

    • GCP Spanner connector start failing when there are multiple indexes on a single column DBZ-6101

    • Negative remaining attempts on MongoDB reconnect case DBZ-6113

    • Tables with spaces or non-ASCII characters in their name are not captured by Oracle because they must be quoted. DBZ-6120

    • Offsets are not advanced in a CDB deployment with low frequency of changes to PDB DBZ-6125

    • Oracle TIMESTAMP WITH TIME ZONE is emitted as GMT during snapshot rather than the specified TZ DBZ-6143

    • Debezium UI E2E Frontend build failing randomly with corrupted Node 16 tar file DBZ-6146

    • Debezium UI SQL Server tests randomly fail due to slow agent start-up DBZ-6149

    • RelationalSnapshotChangeEventSource swallows exception generated during snapshot DBZ-6179

    Other changes

    • Remove redundancies between MySqlJdbcContext and MySqlConnection DBZ-4855

    • Refactor connection management for mongodb connector DBZ-6032

    • Conditionalization anomalies in Oracle connector doc DBZ-6073

    • Optimize debezium-testing-system image to build only modules necessary for tests DBZ-6108

    • Migrate system test jobs to gitlab DBZ-6109

    • Remove references to adding configuration settings to a .properties file DBZ-6130

    • Fix Debezium Server Redis random test failures DBZ-6133

    • Allow TestContainers test framework to expose ConnectorConfiguration as JSON DBZ-6136

    • Upgrade impsort-maven-plugin from 1.7.0 to 1.8.0 DBZ-6144

    • Upgrade Quarkus dependencies to 2.16.3.Final DBZ-6150

    • Github workflows not working for Cassandra job (step Build Debezium Connector Cassandra) DBZ-6171

    • Create SSL scenarios for integration tests for MySQL connector DBZ-6184

    Release 2.2.0.Alpha2 (February 16th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Debezium mapped non-ASCII characters into underscores in topic and schema names. This could lead into conflicts in case of names differing with only non-ASCII characters. Debezium now provides a strategy to map the characters uniquely. As by-product it is no longer possible to use sanitize.field.names config option (DBZ-5743).

    Debezium Server was extracted from the main repository and is now located and built from its won separate repository. This allowed the build process to include the non-core connectors in the assembly package (DBZ-6049).

    SSN field from Oracle connector was propagated as INT32 in the source info block. This could lead to overflows on certain installations so the field is now propagated as INT64 (DBZ-6091).

    New features

    • Better control on debezium GTID usage DBZ-2296

    • Adding new option for "ExtractNewRecordState" SMT to exclude unchanged fields DBZ-5283

    • Reactive implementation of Outbox module DBZ-5758

    • Debezium MongoDB connector wizard Filter definition page needs work DBZ-5899

    • Debezium Storage add support for Apache RocketMQ DBZ-5997

    • debezium-server Pulsar support non-default tenant and namespace DBZ-6033

    • Add wallTime in mongodb source info DBZ-6038

    • Vitess: Support Mapping unsigned bigint mysql column type to long DBZ-6043

    • Increase query.fetch.size default to something sensible above zero DBZ-6079

    • Expose sequence field in CloudEvents message id DBZ-6089

    • Reduce verbosity of skipped transactions if transaction has no events relevant to captured tables DBZ-6094

    • Upgrade Kafka client to 3.4.0 DBZ-6102

    Fixes

    • Not all connectors are available in debezium server DBZ-4038

    • Property event.processing.failure.handling.mode is not present in MySQL documentation DBZ-4829

    • Data type conversion failed for mysql bigint DBZ-5798

    • ActivateTracingSpan wrong timestamps reported DBZ-5827

    • Unable to specify column or table include list if name contains a backslash \ DBZ-5917

    • debezium-connector-cassandra 2.1.0.Alpha2 plugin can no longer run "out of the box" DBZ-5925

    • MongoDB Incremental Snapshot not Working DBZ-5973

    • Nullable columns marked with "optional: false" in DDL events DBZ-6003

    • Vitess: Handle the shard list difference between current db shards and persisted shards DBZ-6011

    • DDL statement with TokuDB engine specific "CLUSTERING KEY" couldn’t be parsed DBZ-6016

    • DDL parse fail for role revoke with "user-like" role name DBZ-6019

    • DDL parse fail for ALTER USER x DEFAULT ROLE y; DBZ-6020

    • Offsets are not flushed on connect offsets topic when encountering an error on Postgres connector DBZ-6026

    • Unexpected format for TIME column: 8:00 DBZ-6029

    • Oracle does not support compression/logging clauses after an LOB storage clause DBZ-6031

    • Debezium is logging the full message along with the error DBZ-6037

    • Improve resilience during internal schema history recovery from Kafka DBZ-6039

    • Incremental snapshot sends the events from signalling DB to Kafka DBZ-6051

    • Mask password in log statement DBZ-6064

    • Loading Custom offset storage fails with Class not found error DBZ-6075

    • SQL Server tasks fail if the number of databases is smaller than maxTasks DBZ-6084

    • When using LOB support, an UPDATE against multiple rows can lead to inconsistent event data DBZ-6107

    Other changes

    • System test-suite ability to prepare OCP environment DBZ-3832

    • TransactionMetadataIT is unstable for Db2 DBZ-5149

    • Update Java Outreach job to use Java 20 DBZ-5825

    • Upgrade to Quarkus 2.16.0.Final DBZ-6005

    • Prepare MongoDB ExtractNewDocumentState SMT doc for downstream GA DBZ-6006

    • SQL Server IncrementalSnapshotWithRecompileIT fails randomly DBZ-6035

    • Remove the redundant "schema.history.internal" from MySqlConnectorConfig DBZ-6040

    • Broken links on FAQ DBZ-6042

    • Upgrade Kafka to 3.3.2 DBZ-6054

    • Upgrade netty version in Pravega to 4.1.86.Final DBZ-6057

    • Return back the driver class option for MySQL connector DBZ-6059

    • Invalid links breaking downstream documentation build DBZ-6069

    • Request SA for UMB DBZ-6077

    • Create certificates for Jenkins for UMB DBZ-6078

    • Request access to cpass UMB topic DBZ-6080

    • Broken debezium-server source file link on docs page DBZ-6111

    Release 2.2.0.Alpha1 (January 19th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.3.1 and has been tested with version 3.3.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.2.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.2.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.2.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    ZonedTimestamp strings were sent with fractional second trailing zeroes removed. Current behaviour is to provide the trailing zeroes padded to the length/scale of the source column (DBZ-5996).

    New features

    • Remove redundant modifiers of members for interface fields DBZ-2439

    • Allow reading from read-only Oracle standby disaster/recovery DBZ-3866

    • Remove option for specifying driver class from MySQL Connector DBZ-4663

    • Support S3 bucket as Dezbium history store DBZ-5402

    • Update the DBZ-UI documentation page to incorporate the recently added "Custom properties" step details DBZ-5878

    • Support retrying database connection failures during connector start DBZ-5879

    • Add support for Connect Headers to Debezium Server DBZ-5926

    • Sink adapter for Apache RocketMQ DBZ-5962

    • Sink adapter for Infinispan DBZ-5986

    • Add custom Debezium banner to Debezium Server DBZ-6004

    • Postgres LSN check should honor event.processing.failure.handling.mode DBZ-6012

    • Enhance the Spanner connector by adding features and/or solving bugs DBZ-6014

    Fixes

    • Debezium is not working with apicurio and custom truststores DBZ-5282

    • Show/Hide password does not work on Connectors View details screen DBZ-5322

    • Snapshotter#snapshotCompleted is invoked regardless of snapshot result DBZ-5852

    • Oracle cannot undo change DBZ-5907

    • Postgresql Data Loss on restarts DBZ-5915

    • Oracle Multithreading lost data DBZ-5945

    • Spanner connector is missing JSR-310 dependency DBZ-5959

    • Truncate records incompatible with ExtractNewRecordState DBZ-5966

    • Computed partition must not be negative DBZ-5967

    • Stream tag images are not published DBZ-5979

    • Table size log message for snapshot.select.statement.overrides tables not correct DBZ-5985

    • NPE in execute snapshot signal with exclude.tables config on giving wrong table name DBZ-5988

    • There is a problem with postgresql connector parsing the boundary value of money type DBZ-5991

    • Log statement for unparseable DDL statement in MySqlDatabaseSchema contains placeholder DBZ-5993

    • Synchronize all actions with core CI & fix GitHub Actions set-output command DBZ-5998

    • Postgresql connector parses the null of the money type into 0 DBZ-6001

    • Run PostgresConnectorIT.shouldReceiveChangesForChangeColumnDefault() failed DBZ-6002

    Other changes

    • Plug-in version information duplicated DBZ-4669

    • Move common code in Cassandra connector core module DBZ-5950

    • website-builder image cannot be built DBZ-5971

    • Zookeeper 3.6.3 available only on archive DBZ-5972

    • Jenkins pipelines don’t provide information about FAILURE status DBZ-5974

    • Remove incubating documentation text for MongoDB ExtractNewDocumentState SMT DBZ-5975

    • Use replace rather than replaceAll DBZ-5976

    • Upgrade Apicurio to 2.4.1.Final DBZ-5977

    • Upgrade JDBC driver to 42.5.1 DBZ-5980

    • Update TestContainers to 1.17.6 DBZ-5990

    • Align pipeline tests with new connector pipelines DBZ-5999

    • Db2 incremental snapshot test execution is blocked DBZ-6008

    \ No newline at end of file diff --git a/releases/2.3/index.html b/releases/2.3/index.html index e40b13b0a9..52522d72f3 100644 --- a/releases/2.3/index.html +++ b/releases/2.3/index.html @@ -1 +1 @@ - Debezium Release Series 2.3

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.32
    MongoDB Database: 4.2, 4.4, 5.0, 6.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14, 15
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.5.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0, 21.3.0.0, 21.4.0.0, 21.5.0.0, 21.6.0.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.3.1.Final

    2023-07-27
    Fixed error handling logic; MySQL and Oracle grammar improvements; Cassandra no longer skips batches with DELETE; MongoDB SRV working on Debezium Server; Configurable option to limit the mining distance from CURRENT_SCN

    2.3.0.Final

    2023-06-20

    2.3.0.CR1

    2023-06-09

    2.3.0.Beta1

    2023-05-26
    Incremental snapshot notifications correlate with the signal id; PostgreSQL connector can configure replica identity for tables; Spanner Connector delivers provider header; Improved error handling of Google PubSub sink; Compatibility with Oracle 23 driver
    \ No newline at end of file + Debezium Release Series 2.3

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.32
    MongoDB Database: 4.2, 4.4, 5.0, 6.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14, 15
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.5.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.0.1, 19.8.0.0, 21.1.0.0, 21.3.0.0, 21.4.0.0, 21.5.0.0, 21.6.0.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.3.1.Final

    2023-07-27
    Fixed error handling logic; MySQL and Oracle grammar improvements; Cassandra no longer skips batches with DELETE; MongoDB SRV working on Debezium Server; Configurable option to limit the mining distance from CURRENT_SCN

    2.3.0.Final

    2023-06-20

    2.3.0.CR1

    2023-06-09

    2.3.0.Beta1

    2023-05-26
    Incremental snapshot notifications correlate with the signal id; PostgreSQL connector can configure replica identity for tables; Spanner Connector delivers provider header; Improved error handling of Google PubSub sink; Compatibility with Oracle 23 driver
    \ No newline at end of file diff --git a/releases/2.3/release-notes.html b/releases/2.3/release-notes.html index ff7df201a6..a39b139edb 100644 --- a/releases/2.3/release-notes.html +++ b/releases/2.3/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 2.3

    Release Notes for Debezium 2.3

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.3.7.Final (January 21st 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.0 and has been tested with version 3.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.7.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.7.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.7.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Oracle connector is ocasionally unable to find SCN DBZ-7345

    Other changes

    There are no other changes in this release.

    Release 2.3.6.Final (January 12nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.0 and has been tested with version 3.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.6.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.6.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.6.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Connector frequently misses commit operations DBZ-6942

    • Missing events from Oracle 19c DBZ-6963

    • Log sequence check should treat each redo thread independently DBZ-7158

    Other changes

    There are no other changes in this release.

    Release 2.3.5.Final (December 15th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.0 and has been tested with version 3.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.5.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Remove spaces from Signal and Notification MBean’s ObjectName DBZ-6957

    • ParsingException (MySQL/MariaDB): User specification with whitespace DBZ-6978

    • RecordsStreamProducerIT#shouldReceiveChangesForInfinityNumericWithInfinity fails on Postgres < 14 DBZ-6986

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Single quote replication and loss of data DBZ-7006

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • [DBZ-6517] [HF] CloudEventsConverter throws static error on Kafka Connect 3.5+ DBZ-7263

    Other changes

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    Release 2.3.4.Final (September 21st 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Configurable order of user defined and internal aggregation pipeline DBZ-6872

    Fixes

    • MongoDB New Document State Extraction: original name overriding does not work DBZ-6773

    • errors.max.retries = 0 Causes retrievable error to be ignored DBZ-6866

    • Oracle alter table drop constraint fails when cascading index DBZ-6876

    • ExtractNewRecordState’s schema cache is not updated with arrival of the ddl change event DBZ-6901

    • Clean log printout in Redis Debezium Sink DBZ-6908

    • MySql connector get NPE when snapshot.mode is set to never and signal data collection configured DBZ-6937

    • Sanity check / retry for redo logs does not work per Oracle RAC thread DBZ-6938

    Other changes

    • Increase Oracle log level to DEBUG for several key important log messages DBZ-6880

    • Document cursor pipeline ordering and oversize document handling mode DBZ-6883

    Release 2.3.3.Final (September 4th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Oracle SCN metrics were exposed as strings. This could prevent ceratin tools like JMX exporter from scraping them. Debezium currently exposes them as numerc BigInteger (DBZ-6798).

    New features

    • Debezium 2.3.0.Final Missing Kafka Channel Documentation DBZ-6688

    • Make partial and multi-response transactions debug level logs DBZ-6830

    Fixes

    • Support PostgreSQL coercion for UUID, JSON, and JSONB data types DBZ-6589

    • Debezium crashes on parsing MySQL DDL statement (specific JOIN) DBZ-6724

    • ExtractNewDocumentState for MongoDB ignore previous document state when handling delete event’s with REWRITE DBZ-6725

    • When using pgoutput in postgres connector, (+/-)Infinity is not supported in decimal values DBZ-6758

    • Outbox transformation can cause connector to crash DBZ-6760

    • Postgres tests for toasted byte array and toasted date array fail with decoderbufs plugin DBZ-6767

    • MongoDB New Document State Extraction: nonexistent field for add.headers DBZ-6774

    • Notifications and signals leaks between MBean instances when using JMX channels DBZ-6777

    • Dbz crashes on parsing MySQL DDL statement (SELECT 1.;) DBZ-6780

    • Dbz crashed on parsing MySQL DDL statement (SELECT 1 + @sum:=1 AS ss;) DBZ-6794

    • MySQL DDL parser - REPEAT function not accepted DBZ-6803

    • Dbz crashes on DDL statement (non Latin chars in variables) DBZ-6821

    • Not trim the default value for the BIGINT and SMALLINT types when parsing MySQL DDL DBZ-6824

    • Oracle test shouldContinueToUpdateOffsetsEvenWhenTableIsNotChanged fails with NPE DBZ-6860

    • Streaming aggregation pipeline broken for combination of database filter and signal collection DBZ-6867

    Other changes

    • Missing or misspelled IDs result in downstream build errors DBZ-6754

    Release 2.3.2.Final (August 4th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Oracle connector used 2000 as the default value for LogMiner query fetch size. The value was changed to 10000 based on community feedback as it provides significantly better performance without any negatives (DBZ-6729).

    New features

    There are no new features in this release.

    Fixes

    • Should use topic.prefix rather than connector.server.name in MBean namings DBZ-6690

    • Custom properties step not working correctly in validation of the properties added by user DBZ-6711

    • Oracle fails to process a DROP USER DBZ-6716

    • Oracle LogMiner mining distance calculation should be skipped when upper bounds is not within distance DBZ-6733

    • MariaDB: Unparseable DDL statement (ALTER TABLE IF EXISTS) DBZ-6736

    • SQL Server fail to start due to duplicate definition of query.fetch.size DBZ-6743

    • MySQL dialect does not properly recognize non-default value longblob types due to typo DBZ-6753

    Other changes

    • Highlight information about how to configure the schema history topic to store data only for intended tables DBZ-6219

    • Upstream documentation missing types for configurations DBZ-6707

    • Decouple Debezium Server and Extension Quarkus versions DBZ-6744

    Release 2.3.1.Final (July 27th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Refactor errors.max.retries to common connector framework DBZ-6573

    • Log appropriate error when JDBC connector receive SchemaChange record DBZ-6655

    • Introduce internal config option to control how close to CURRENT_SCN Oracle may mine DBZ-6660

    Fixes

    • Batches with DELETE statement first will skip everything else DBZ-6576

    • Oracle unsupported DDL statement - drop multiple partitions DBZ-6585

    • Only Struct objects supported for [Header field insertion], found: null DBZ-6588

    • MySQL parser cannot parse CAST AS dec DBZ-6590

    • Excessive Log Message 'Marking Processed Record for Topic' DBZ-6597

    • Oracle DDL parser does not properly detect end of statement when comments obfuscate the semicolon DBZ-6599

    • Fixed DataCollections for table scan completion notificaiton DBZ-6605

    • Oracle connector is not recoverable if ORA-01327 is wrapped by another JDBC or Oracle exception DBZ-6610

    • Fatal error when parsing Mysql (Percona 5.7.39-42) procedure DBZ-6613

    • MySQL ALTER USER with RETAIN CURRENT PASSWORD fails with parsing exception DBZ-6622

    • Inaccurate documentation regarding additional-condition DBZ-6628

    • Oracle connection SQLRecoverableExceptions are not retried by default DBZ-6633

    • When Debezium Mongodb connector encounter authentication or under privilege errors, the connection between debezium and mongodb keeps going up. DBZ-6643

    • Cannot delete non-null interval value DBZ-6648

    • ConcurrentModificationException thrown in Debezium 2.3 DBZ-6650

    • Dbz crashes on parsing Mysql Procedure Code (Statement Labels) DBZ-6651

    • Vitess: Connector fails if table name is a mysql reserved word DBZ-6656

    • Retriable operations are retried infinitely since error handlers are not reused DBZ-6670

    • Oracle DDL parser does not support column visibility on ALTER TABLE DBZ-6677

    • MongoDB SRV protocol not working in Debezium Server DBZ-6701

    • Add tzdata-java to UI installation Dockerfile DBZ-6713

    Other changes

    • Refactor retry handling in Redis schema history DBZ-6594

    • NotificationIT with Oracle xstream fails randomly DBZ-6672

    • Flaky Oracle test: shouldCaptureChangesForTransactionsAcrossSnapshotBoundaryWithoutReemittingDDLChanges DBZ-6673

    Release 2.3.0.Final (June 20th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add support for custom SourceInfoStructMaker for adding new fields to source field DBZ-6076

    • Connector can potentially read a lot of sync topic messages on startup DBZ-6308

    • Allow to specify separate SID for rac.nodes settings DBZ-6359

    • Periodically clean up SGA using new LogMiner connection DBZ-6499

    • Upgrade debezium-connector-mysql tests to use MySQL 8 DBZ-6534

    • Remove duplicate partitions in TaskSyncContext. DBZ-6544

    • Support exactly-once semantic for streaming phase from Postgres connector DBZ-6547

    • Monitoring failed Incremental Snapshots DBZ-6552

    Fixes

    • Upgrade to Infinispan 14.0.11.Final to fix CVE-2022-45047 DBZ-6193

    • Date and Time values without timezones are not persisted correctly based on database.time_zone DBZ-6399

    • "Ignoring invalid task provided offset" DBZ-6463

    • Oracle snapshot.include.collection.list should be prefixed with databaseName in documentation. DBZ-6474

    • Allow schema to be specified in the Debezium Sink Connector configuration DBZ-6491

    • Error value of negative seconds in convertOracleIntervalDaySecond DBZ-6513

    • Parse mysql table name failed which ending with backslash DBZ-6519

    • Oracle Connector: Snapshot fails with specific combination DBZ-6528

    • Table order is incorrect on snapshots DBZ-6533

    • Unhandled NullPointerException in PartitionRouting will crash the whole connect plugin DBZ-6543

    • Incorrect image name in postgres example of the operator repo DBZ-6548

    • Examples are not updated with correct image tags for released DBZ-6549

    • SQL grammar exception on MySQL ALTER statements with multiple columns DBZ-6554

    • debezium/connect image for 2.2.1.Final is not available on dockerhub or quay.io DBZ-6558

    • Bug in field.name.adjustment.mode Property DBZ-6559

    • Operator sets incorrect value of transformation.predicate when no predicate is specified DBZ-6560

    • Kubernetes-Config extension interferes with SSL tests due to k8 devservice starting up DBZ-6574

    • MySQL read-only connector with Kafka signals enabled fails on start up DBZ-6579

    • Redis schema history can fail upon startup DBZ-6580

    Other changes

    • Use "debezium/kafka" container for Debezium UI tests instead of "confluentinc/cp-kafka" DBZ-6449

    • Include debezium operator in image build pipeline DBZ-6546

    • Update repository list in contributor list and missing commit workflows DBZ-6556

    • Upgrade MySQL JDBC driver to 8.0.33 DBZ-6563

    • Upgrade Google Cloud BOM to 26.17.0 DBZ-6570

    Release 2.3.0.CR1 (June 9th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Code Improvements for skip.messages.without.change DBZ-6366

    • Allow sending signals and receiving notifications via JMX DBZ-6424

    • MySql in debezium-parser-ddl does not support TABLE statement parsing DBZ-6435

    • Utilize event.processing.failure.handling.mode in Vitess replication connection DBZ-6510

    • Only use error processing mode on certain errors DBZ-6523

    • Use better hashing function for PartitionRouting DBZ-6529

    • Create PoC of Debezium Server Operator DBZ-6493

    Fixes

    • Create OCP cluster provisioning jobs DBZ-3129

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira issue with the statement DBZ-6507

    • Oracle Connector failed parsing DDL Statement DBZ-6508

    • FileSignalChannel is not loaded DBZ-6509

    • MySqlReadOnlyIncrementalSnapshotChangeEventSource enforces Kafka dependency during initialization DBZ-6511

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6512

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6515

    • [PostgreSQL] LTree data is not being captured by streaming DBZ-6524

    • MySQL "national" keyword is not accepted as column name DBZ-6537

    Other changes

    • Test Debezium on RED HAT OPENSHIFT DATABASE ACCESS - MongoDB Atlas DBZ-5231

    • Add docs on how to extend channels and notification DBZ-6408

    • Create Cron trigger for system tests DBZ-6423

    • Debezium UI Repo dependency update DBZ-6473

    • Add Debezium Server nightly images DBZ-6536

    • Include debezium operator in release scripts DBZ-6539

    • Start publishing nightly images for Debezium Operator DBZ-6541

    • Start releasing images for Debezium Operator DBZ-6542

    Release 2.3.0.Beta1 (May 26th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    JDBC storage was by default using UTF-16 encoding. Most of the databases use UTF-8 as the default so JDBC storage was aligned with it (DBZ-6476).

    New features

    • Testsuite should deploy PostgreSQL with Primary-Secondary streaming replication DBZ-3202

    • PostgreSQL: Set Replica Identity when the connector starts DBZ-6112

    • Correlate incremental snapshot notifications ids with execute signal DBZ-6447

    • [MariaDB] Add support for userstat plugin keywords DBZ-6459

    • Add a header provider string DBZ-6489

    Fixes

    • Debezium Server stops sending events to Google Cloud Pub/Sub DBZ-5175

    • Snapshot step 5 - Reading structure of captured tables time too long DBZ-6439

    • Oracle parallel snapshots do not properly set PDB context when using multitenancy DBZ-6457

    • Debezium Server cannot recover from Google Pub/Sub errors DBZ-6461

    • DDL statement couldn’t be parsed: AUTHENTICATION_POLICY_ADMIN DBZ-6479

    • Db2 connector can fail with NPE on notification sending DBZ-6485

    • BigDecimal fails when queue memory size limit is in place DBZ-6490

    • ORACLE table can not be captrued, got runtime.NoViableAltException DBZ-6492

    • Signal poll interval has incorrect default value DBZ-6496

    • Oracle JDBC driver 23.x throws ORA-18716 - not in any time zone DBZ-6502

    • Alpine postgres images should use llvm/clang 15 explicitly DBZ-6506

    • ExtractNewRecordState SMT in combination with HeaderToValue SMT results in Unexpected field name exception DBZ-6486

    Other changes

    • Verify MongoDB Connector with AWS DocumentDB DBZ-6419

    • Enable set log level in tests DBZ-6460

    • Check OOME on CI tests DBZ-6462

    • Signaling data collection document should refer to source database DBZ-6470

    Release 2.3.0.Alpha1 (May 11st 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    MySQL connector now uses SSL connection if available by default (DBZ-6340).

    New features

    • Enable Debezium to send notifications about it’s status DBZ-1973

    • Saving Debezium states to JDBC database DBZ-3621

    • Make signalling channel configurable DBZ-4027

    • Edit a connector in Debezium UI DBZ-5313

    • Add connector display name and id to Config endpoint response DBZ-5865

    • Introduce LogMiner query filtering modes DBZ-6254

    • Ensure that the connector can start from a stale timestamp more than one hour into the past DBZ-6307

    • Add JWT authentication to HTTP Client DBZ-6348

    • Monitoring progress of Incremental Snapshots DBZ-6354

    • log.mining.transaction.retention.hours should reference last offset and not sysdate DBZ-6355

    • Support multiple tasks when streaming shard list DBZ-6365

    • Kinesis Sink - AWS Credentials Provider DBZ-6372

    • Fix existing bug in information schema query in the Spanner connector DBZ-6385

    • change logging level of skip.messages.without.change DBZ-6391

    • Debezium UI should ignore unsupported connectors, including unsupported Debezium connectors DBZ-6426

    • Make DELETE sql configurable in JDBC Storage DBZ-6433

    • Include redo/archive log metadata on ORA-01291 exceptions DBZ-6436

    Fixes

    • Back button is not working on the review page UI DBZ-5841

    • Toasted varying character array and date array are not correcly processed DBZ-6122

    • Incorrect dependencies in Debezium Server for Cassandra connector DBZ-6147

    • Lock contention on LOG_MINING_FLUSH table when multiple connectors deployed DBZ-6256

    • Document Requirements for multiple connectors on same db host DBZ-6321

    • The rs_id field is null in Oracle change event source information block DBZ-6329

    • Using pg_replication_slot_advance which is not supported by PostgreSQL10. DBZ-6353

    • 'CREATE TABLE t (c NATIONAL CHAR)' parsing failed DBZ-6357

    • Toasted hstore are not correcly processed DBZ-6379

    • Snapshotting does not work for hstore in Map mode DBZ-6384

    • Oracle DDL shrink space for table partition can not be parsed DBZ-6386

    • __source_ts_ms r (read) operation date is set to future for SQL Server DBZ-6388

    • Connector cards are misaligned on first step DBZ-6392

    • Debezium Server snapshots are not published DBZ-6395

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-6396

    • MySql in debezium-parser-ddl :The inserted sql statement reports an error DBZ-6401

    • MongoDB connector crashes on invalid resume token DBZ-6402

    • Set (instead of adding) Authorization Headers DBZ-6405

    • New SMT HeaderToValue not working DBZ-6411

    • Debezium Server 2.2.0.Final BOM refers to debezium-build-parent 2.2.0-SNAPSHOT DBZ-6437

    • NPE on read-only MySQL connector start up DBZ-6440

    • Oracle Connector failed parsing DDL Statement DBZ-6442

    • Oracle DDL shrink space for index partition can not be parsed DBZ-6446

    Other changes

    • Verify streaming off of secondary works DBZ-1661

    • Remove the old connector type endpoints from the UI backend DBZ-5604

    • Incremental snapshot completion notifications DBZ-5632

    • Change connector test matrix jobs to pipeline jobs and migrate them to gitlab jenkins DBZ-5861

    • Add Debezium steps when performing a PostgreSQL database upgrade DBZ-6046

    • Test migration from Debezium 1.x to 2.x DBZ-6126

    • Remove OCP 4.8 and 4.9 from 1.x supported configurations page DBZ-6132

    • Remove potentially dangerous JDBC props in MySQL connections DBZ-6157

    • Refactor storage implementations DBZ-6209

    • Align connector field snapshot.mode descriptions as per documentation DBZ-6259

    • Document "incubating" status of incremental snapshot for sharded MongoDB clusters DBZ-6342

    • Run debezium-connector-jdbc build on 'Build Debezium' CI workflow DBZ-6360

    • Migrate Debezium UI MongoDB to MongoDbReplicaSet from core DBZ-6363

    • Base the "replaceable" build numbers in legacy deployment instructions on debezium-build-number attribute DBZ-6371

    • Align Debezium UI to Debezium 2.3 DBZ-6406

    • Fix CORS error in UI due to Quarkus 3 upgrade DBZ-6422

    • Improve debezium-storage CI build step DBZ-6443

    • Use debezium-bom versions for shared dependencies in Debezium UI DBZ-6453

    \ No newline at end of file + Release Notes for Debezium 2.3

    Release Notes for Debezium 2.3

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.3.7.Final (January 21st 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.0 and has been tested with version 3.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.7.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.7.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.7.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Oracle connector is ocasionally unable to find SCN DBZ-7345

    Other changes

    There are no other changes in this release.

    Release 2.3.6.Final (January 12nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.0 and has been tested with version 3.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.6.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.6.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.6.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Connector frequently misses commit operations DBZ-6942

    • Missing events from Oracle 19c DBZ-6963

    • Log sequence check should treat each redo thread independently DBZ-7158

    Other changes

    There are no other changes in this release.

    Release 2.3.5.Final (December 15th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.0 and has been tested with version 3.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.5.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Remove spaces from Signal and Notification MBean’s ObjectName DBZ-6957

    • ParsingException (MySQL/MariaDB): User specification with whitespace DBZ-6978

    • RecordsStreamProducerIT#shouldReceiveChangesForInfinityNumericWithInfinity fails on Postgres < 14 DBZ-6986

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Single quote replication and loss of data DBZ-7006

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • [DBZ-6517] [HF] CloudEventsConverter throws static error on Kafka Connect 3.5+ DBZ-7263

    Other changes

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    Release 2.3.4.Final (September 21st 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Configurable order of user defined and internal aggregation pipeline DBZ-6872

    Fixes

    • MongoDB New Document State Extraction: original name overriding does not work DBZ-6773

    • errors.max.retries = 0 Causes retrievable error to be ignored DBZ-6866

    • Oracle alter table drop constraint fails when cascading index DBZ-6876

    • ExtractNewRecordState’s schema cache is not updated with arrival of the ddl change event DBZ-6901

    • Clean log printout in Redis Debezium Sink DBZ-6908

    • MySql connector get NPE when snapshot.mode is set to never and signal data collection configured DBZ-6937

    • Sanity check / retry for redo logs does not work per Oracle RAC thread DBZ-6938

    Other changes

    • Increase Oracle log level to DEBUG for several key important log messages DBZ-6880

    • Document cursor pipeline ordering and oversize document handling mode DBZ-6883

    Release 2.3.3.Final (September 4th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Oracle SCN metrics were exposed as strings. This could prevent ceratin tools like JMX exporter from scraping them. Debezium currently exposes them as numerc BigInteger (DBZ-6798).

    New features

    • Debezium 2.3.0.Final Missing Kafka Channel Documentation DBZ-6688

    • Make partial and multi-response transactions debug level logs DBZ-6830

    Fixes

    • Support PostgreSQL coercion for UUID, JSON, and JSONB data types DBZ-6589

    • Debezium crashes on parsing MySQL DDL statement (specific JOIN) DBZ-6724

    • ExtractNewDocumentState for MongoDB ignore previous document state when handling delete event’s with REWRITE DBZ-6725

    • When using pgoutput in postgres connector, (+/-)Infinity is not supported in decimal values DBZ-6758

    • Outbox transformation can cause connector to crash DBZ-6760

    • Postgres tests for toasted byte array and toasted date array fail with decoderbufs plugin DBZ-6767

    • MongoDB New Document State Extraction: nonexistent field for add.headers DBZ-6774

    • Notifications and signals leaks between MBean instances when using JMX channels DBZ-6777

    • Dbz crashes on parsing MySQL DDL statement (SELECT 1.;) DBZ-6780

    • Dbz crashed on parsing MySQL DDL statement (SELECT 1 + @sum:=1 AS ss;) DBZ-6794

    • MySQL DDL parser - REPEAT function not accepted DBZ-6803

    • Dbz crashes on DDL statement (non Latin chars in variables) DBZ-6821

    • Not trim the default value for the BIGINT and SMALLINT types when parsing MySQL DDL DBZ-6824

    • Oracle test shouldContinueToUpdateOffsetsEvenWhenTableIsNotChanged fails with NPE DBZ-6860

    • Streaming aggregation pipeline broken for combination of database filter and signal collection DBZ-6867

    Other changes

    • Missing or misspelled IDs result in downstream build errors DBZ-6754

    Release 2.3.2.Final (August 4th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    Oracle connector used 2000 as the default value for LogMiner query fetch size. The value was changed to 10000 based on community feedback as it provides significantly better performance without any negatives (DBZ-6729).

    New features

    There are no new features in this release.

    Fixes

    • Should use topic.prefix rather than connector.server.name in MBean namings DBZ-6690

    • Custom properties step not working correctly in validation of the properties added by user DBZ-6711

    • Oracle fails to process a DROP USER DBZ-6716

    • Oracle LogMiner mining distance calculation should be skipped when upper bounds is not within distance DBZ-6733

    • MariaDB: Unparseable DDL statement (ALTER TABLE IF EXISTS) DBZ-6736

    • SQL Server fail to start due to duplicate definition of query.fetch.size DBZ-6743

    • MySQL dialect does not properly recognize non-default value longblob types due to typo DBZ-6753

    Other changes

    • Highlight information about how to configure the schema history topic to store data only for intended tables DBZ-6219

    • Upstream documentation missing types for configurations DBZ-6707

    • Decouple Debezium Server and Extension Quarkus versions DBZ-6744

    Release 2.3.1.Final (July 27th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Refactor errors.max.retries to common connector framework DBZ-6573

    • Log appropriate error when JDBC connector receive SchemaChange record DBZ-6655

    • Introduce internal config option to control how close to CURRENT_SCN Oracle may mine DBZ-6660

    Fixes

    • Batches with DELETE statement first will skip everything else DBZ-6576

    • Oracle unsupported DDL statement - drop multiple partitions DBZ-6585

    • Only Struct objects supported for [Header field insertion], found: null DBZ-6588

    • MySQL parser cannot parse CAST AS dec DBZ-6590

    • Excessive Log Message 'Marking Processed Record for Topic' DBZ-6597

    • Oracle DDL parser does not properly detect end of statement when comments obfuscate the semicolon DBZ-6599

    • Fixed DataCollections for table scan completion notificaiton DBZ-6605

    • Oracle connector is not recoverable if ORA-01327 is wrapped by another JDBC or Oracle exception DBZ-6610

    • Fatal error when parsing Mysql (Percona 5.7.39-42) procedure DBZ-6613

    • MySQL ALTER USER with RETAIN CURRENT PASSWORD fails with parsing exception DBZ-6622

    • Inaccurate documentation regarding additional-condition DBZ-6628

    • Oracle connection SQLRecoverableExceptions are not retried by default DBZ-6633

    • When Debezium Mongodb connector encounter authentication or under privilege errors, the connection between debezium and mongodb keeps going up. DBZ-6643

    • Cannot delete non-null interval value DBZ-6648

    • ConcurrentModificationException thrown in Debezium 2.3 DBZ-6650

    • Dbz crashes on parsing Mysql Procedure Code (Statement Labels) DBZ-6651

    • Vitess: Connector fails if table name is a mysql reserved word DBZ-6656

    • Retriable operations are retried infinitely since error handlers are not reused DBZ-6670

    • Oracle DDL parser does not support column visibility on ALTER TABLE DBZ-6677

    • MongoDB SRV protocol not working in Debezium Server DBZ-6701

    • Add tzdata-java to UI installation Dockerfile DBZ-6713

    Other changes

    • Refactor retry handling in Redis schema history DBZ-6594

    • NotificationIT with Oracle xstream fails randomly DBZ-6672

    • Flaky Oracle test: shouldCaptureChangesForTransactionsAcrossSnapshotBoundaryWithoutReemittingDDLChanges DBZ-6673

    Release 2.3.0.Final (June 20th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add support for custom SourceInfoStructMaker for adding new fields to source field DBZ-6076

    • Connector can potentially read a lot of sync topic messages on startup DBZ-6308

    • Allow to specify separate SID for rac.nodes settings DBZ-6359

    • Periodically clean up SGA using new LogMiner connection DBZ-6499

    • Upgrade debezium-connector-mysql tests to use MySQL 8 DBZ-6534

    • Remove duplicate partitions in TaskSyncContext. DBZ-6544

    • Support exactly-once semantic for streaming phase from Postgres connector DBZ-6547

    • Monitoring failed Incremental Snapshots DBZ-6552

    Fixes

    • Upgrade to Infinispan 14.0.11.Final to fix CVE-2022-45047 DBZ-6193

    • Date and Time values without timezones are not persisted correctly based on database.time_zone DBZ-6399

    • "Ignoring invalid task provided offset" DBZ-6463

    • Oracle snapshot.include.collection.list should be prefixed with databaseName in documentation. DBZ-6474

    • Allow schema to be specified in the Debezium Sink Connector configuration DBZ-6491

    • Error value of negative seconds in convertOracleIntervalDaySecond DBZ-6513

    • Parse mysql table name failed which ending with backslash DBZ-6519

    • Oracle Connector: Snapshot fails with specific combination DBZ-6528

    • Table order is incorrect on snapshots DBZ-6533

    • Unhandled NullPointerException in PartitionRouting will crash the whole connect plugin DBZ-6543

    • Incorrect image name in postgres example of the operator repo DBZ-6548

    • Examples are not updated with correct image tags for released DBZ-6549

    • SQL grammar exception on MySQL ALTER statements with multiple columns DBZ-6554

    • debezium/connect image for 2.2.1.Final is not available on dockerhub or quay.io DBZ-6558

    • Bug in field.name.adjustment.mode Property DBZ-6559

    • Operator sets incorrect value of transformation.predicate when no predicate is specified DBZ-6560

    • Kubernetes-Config extension interferes with SSL tests due to k8 devservice starting up DBZ-6574

    • MySQL read-only connector with Kafka signals enabled fails on start up DBZ-6579

    • Redis schema history can fail upon startup DBZ-6580

    Other changes

    • Use "debezium/kafka" container for Debezium UI tests instead of "confluentinc/cp-kafka" DBZ-6449

    • Include debezium operator in image build pipeline DBZ-6546

    • Update repository list in contributor list and missing commit workflows DBZ-6556

    • Upgrade MySQL JDBC driver to 8.0.33 DBZ-6563

    • Upgrade Google Cloud BOM to 26.17.0 DBZ-6570

    Release 2.3.0.CR1 (June 9th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Code Improvements for skip.messages.without.change DBZ-6366

    • Allow sending signals and receiving notifications via JMX DBZ-6424

    • MySql in debezium-parser-ddl does not support TABLE statement parsing DBZ-6435

    • Utilize event.processing.failure.handling.mode in Vitess replication connection DBZ-6510

    • Only use error processing mode on certain errors DBZ-6523

    • Use better hashing function for PartitionRouting DBZ-6529

    • Create PoC of Debezium Server Operator DBZ-6493

    Fixes

    • Create OCP cluster provisioning jobs DBZ-3129

    • io.debezium.text.ParsingException: DDL statement couldn’t be parsed. Please open a Jira issue with the statement DBZ-6507

    • Oracle Connector failed parsing DDL Statement DBZ-6508

    • FileSignalChannel is not loaded DBZ-6509

    • MySqlReadOnlyIncrementalSnapshotChangeEventSource enforces Kafka dependency during initialization DBZ-6511

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6512

    • Debezium incremental snapshot chunk size documentation unclear or incorrect DBZ-6515

    • [PostgreSQL] LTree data is not being captured by streaming DBZ-6524

    • MySQL "national" keyword is not accepted as column name DBZ-6537

    Other changes

    • Test Debezium on RED HAT OPENSHIFT DATABASE ACCESS - MongoDB Atlas DBZ-5231

    • Add docs on how to extend channels and notification DBZ-6408

    • Create Cron trigger for system tests DBZ-6423

    • Debezium UI Repo dependency update DBZ-6473

    • Add Debezium Server nightly images DBZ-6536

    • Include debezium operator in release scripts DBZ-6539

    • Start publishing nightly images for Debezium Operator DBZ-6541

    • Start releasing images for Debezium Operator DBZ-6542

    Release 2.3.0.Beta1 (May 26th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    JDBC storage was by default using UTF-16 encoding. Most of the databases use UTF-8 as the default so JDBC storage was aligned with it (DBZ-6476).

    New features

    • Testsuite should deploy PostgreSQL with Primary-Secondary streaming replication DBZ-3202

    • PostgreSQL: Set Replica Identity when the connector starts DBZ-6112

    • Correlate incremental snapshot notifications ids with execute signal DBZ-6447

    • [MariaDB] Add support for userstat plugin keywords DBZ-6459

    • Add a header provider string DBZ-6489

    Fixes

    • Debezium Server stops sending events to Google Cloud Pub/Sub DBZ-5175

    • Snapshot step 5 - Reading structure of captured tables time too long DBZ-6439

    • Oracle parallel snapshots do not properly set PDB context when using multitenancy DBZ-6457

    • Debezium Server cannot recover from Google Pub/Sub errors DBZ-6461

    • DDL statement couldn’t be parsed: AUTHENTICATION_POLICY_ADMIN DBZ-6479

    • Db2 connector can fail with NPE on notification sending DBZ-6485

    • BigDecimal fails when queue memory size limit is in place DBZ-6490

    • ORACLE table can not be captrued, got runtime.NoViableAltException DBZ-6492

    • Signal poll interval has incorrect default value DBZ-6496

    • Oracle JDBC driver 23.x throws ORA-18716 - not in any time zone DBZ-6502

    • Alpine postgres images should use llvm/clang 15 explicitly DBZ-6506

    • ExtractNewRecordState SMT in combination with HeaderToValue SMT results in Unexpected field name exception DBZ-6486

    Other changes

    • Verify MongoDB Connector with AWS DocumentDB DBZ-6419

    • Enable set log level in tests DBZ-6460

    • Check OOME on CI tests DBZ-6462

    • Signaling data collection document should refer to source database DBZ-6470

    Release 2.3.0.Alpha1 (May 11st 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.4.0 and has been tested with version 3.4.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.3.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.3.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.3.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from DockerHub.

    Breaking changes

    MySQL connector now uses SSL connection if available by default (DBZ-6340).

    New features

    • Enable Debezium to send notifications about it’s status DBZ-1973

    • Saving Debezium states to JDBC database DBZ-3621

    • Make signalling channel configurable DBZ-4027

    • Edit a connector in Debezium UI DBZ-5313

    • Add connector display name and id to Config endpoint response DBZ-5865

    • Introduce LogMiner query filtering modes DBZ-6254

    • Ensure that the connector can start from a stale timestamp more than one hour into the past DBZ-6307

    • Add JWT authentication to HTTP Client DBZ-6348

    • Monitoring progress of Incremental Snapshots DBZ-6354

    • log.mining.transaction.retention.hours should reference last offset and not sysdate DBZ-6355

    • Support multiple tasks when streaming shard list DBZ-6365

    • Kinesis Sink - AWS Credentials Provider DBZ-6372

    • Fix existing bug in information schema query in the Spanner connector DBZ-6385

    • change logging level of skip.messages.without.change DBZ-6391

    • Debezium UI should ignore unsupported connectors, including unsupported Debezium connectors DBZ-6426

    • Make DELETE sql configurable in JDBC Storage DBZ-6433

    • Include redo/archive log metadata on ORA-01291 exceptions DBZ-6436

    Fixes

    • Back button is not working on the review page UI DBZ-5841

    • Toasted varying character array and date array are not correcly processed DBZ-6122

    • Incorrect dependencies in Debezium Server for Cassandra connector DBZ-6147

    • Lock contention on LOG_MINING_FLUSH table when multiple connectors deployed DBZ-6256

    • Document Requirements for multiple connectors on same db host DBZ-6321

    • The rs_id field is null in Oracle change event source information block DBZ-6329

    • Using pg_replication_slot_advance which is not supported by PostgreSQL10. DBZ-6353

    • 'CREATE TABLE t (c NATIONAL CHAR)' parsing failed DBZ-6357

    • Toasted hstore are not correcly processed DBZ-6379

    • Snapshotting does not work for hstore in Map mode DBZ-6384

    • Oracle DDL shrink space for table partition can not be parsed DBZ-6386

    • __source_ts_ms r (read) operation date is set to future for SQL Server DBZ-6388

    • Connector cards are misaligned on first step DBZ-6392

    • Debezium Server snapshots are not published DBZ-6395

    • PostgreSQL connector task fails to resume streaming because replication slot is active DBZ-6396

    • MySql in debezium-parser-ddl :The inserted sql statement reports an error DBZ-6401

    • MongoDB connector crashes on invalid resume token DBZ-6402

    • Set (instead of adding) Authorization Headers DBZ-6405

    • New SMT HeaderToValue not working DBZ-6411

    • Debezium Server 2.2.0.Final BOM refers to debezium-build-parent 2.2.0-SNAPSHOT DBZ-6437

    • NPE on read-only MySQL connector start up DBZ-6440

    • Oracle Connector failed parsing DDL Statement DBZ-6442

    • Oracle DDL shrink space for index partition can not be parsed DBZ-6446

    Other changes

    • Verify streaming off of secondary works DBZ-1661

    • Remove the old connector type endpoints from the UI backend DBZ-5604

    • Incremental snapshot completion notifications DBZ-5632

    • Change connector test matrix jobs to pipeline jobs and migrate them to gitlab jenkins DBZ-5861

    • Add Debezium steps when performing a PostgreSQL database upgrade DBZ-6046

    • Test migration from Debezium 1.x to 2.x DBZ-6126

    • Remove OCP 4.8 and 4.9 from 1.x supported configurations page DBZ-6132

    • Remove potentially dangerous JDBC props in MySQL connections DBZ-6157

    • Refactor storage implementations DBZ-6209

    • Align connector field snapshot.mode descriptions as per documentation DBZ-6259

    • Document "incubating" status of incremental snapshot for sharded MongoDB clusters DBZ-6342

    • Run debezium-connector-jdbc build on 'Build Debezium' CI workflow DBZ-6360

    • Migrate Debezium UI MongoDB to MongoDbReplicaSet from core DBZ-6363

    • Base the "replaceable" build numbers in legacy deployment instructions on debezium-build-number attribute DBZ-6371

    • Align Debezium UI to Debezium 2.3 DBZ-6406

    • Fix CORS error in UI due to Quarkus 3 upgrade DBZ-6422

    • Improve debezium-storage CI build step DBZ-6443

    • Use debezium-bom versions for shared dependencies in Debezium UI DBZ-6453

    \ No newline at end of file diff --git a/releases/2.4/index.html b/releases/2.4/index.html index 124322bd55..48b5fbc20c 100644 --- a/releases/2.4/index.html +++ b/releases/2.4/index.html @@ -1 +1 @@ - Debezium Release Series 2.4

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.33
    MongoDB Database: 4.4, 5.0, 6.0, 7.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.0
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.4.0.CR1

    2023-09-22
    Parallelized incremental snapshot for MongoDB; Spanner connector is more robust in high-rebalance environment; Vitess connector retries most of the errors by default; Support for PostgresSQL 16; Spanner connector supports GKE identities

    2.4.0.Beta1

    2023-08-29

    2.4.0.Alpha1

    2023-07-14
    Kafka 3.5; Initial snapshot progress notifications; Custom tags for connector metrics; MongoDB connector no longer requires cluster-wide privileges; Debezium UI displays critical metrics; Support for MySQL parallelized schema snapshot; Support for Oracle XML datatypes
    \ No newline at end of file + Debezium Release Series 2.4

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 5.7, 8.0.x
    Driver: 8.0.33
    MongoDB Database: 4.4, 5.0, 6.0, 7.0
    Driver: 4.7.1
    PostgreSQL Database: 10, 11, 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.0
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019
    Driver: 10.2.1.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.4.0.CR1

    2023-09-22
    Parallelized incremental snapshot for MongoDB; Spanner connector is more robust in high-rebalance environment; Vitess connector retries most of the errors by default; Support for PostgresSQL 16; Spanner connector supports GKE identities

    2.4.0.Beta1

    2023-08-29

    2.4.0.Alpha1

    2023-07-14
    Kafka 3.5; Initial snapshot progress notifications; Custom tags for connector metrics; MongoDB connector no longer requires cluster-wide privileges; Debezium UI displays critical metrics; Support for MySQL parallelized schema snapshot; Support for Oracle XML datatypes
    \ No newline at end of file diff --git a/releases/2.4/release-notes.html b/releases/2.4/release-notes.html index 463d2e51b1..0abe32f7e7 100644 --- a/releases/2.4/release-notes.html +++ b/releases/2.4/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 2.4

    Release Notes for Debezium 2.4

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.4.2.Final (December 13rd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UPDATE) DBZ-7152

    • Log sequence check should treat each redo thread independently DBZ-7158

    • Parsing MySQL indexes for JSON field fails, when casting is used with types double and float DBZ-7189

    • Unchanged toasted array columns are substituted with unavailable.value.placeholder, even when REPLICA IDENTITY FULL is configured. DBZ-7193

    • Broken support for multi-namespace watching DBZ-7254

    Other changes

    There are no other changes in this release.

    Release 2.4.1.Final (November 16th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Provide capability to set image pull secrets in DS k8s CRD DBZ-6962

    • Add displayName and description metadata to DebeziumServer CRD in OLM Bundle DBZ-7011

    • Provide resources to set pod requests and limits in DS k8s CRD DBZ-7052

    • Provide svc to better collects dbz-server metrics in DS k8s DBZ-7053

    Fixes

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Missing events from Oracle 19c DBZ-6963

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • OLM bundle version for GA releases is invalid DBZ-6994

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • DDL statement couldn’t be parsed DBZ-7030

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Field exclusion does not work with events of removed fields DBZ-7058

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    Other changes

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • Use optional schema for Timezone Converter tests DBZ-7020

    • Update operator dependencies and add qosdk platform bom DBZ-7048

    • Consolidate resource labels and annotations DBZ-7064

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    Release 2.4.0.Final (October 3rd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add timestamp to Notification DBZ-6793

    • Documentation for cursor.oversize.skip.threshold is missing units DBZ-6968

    • Add MongoDB Connector support for filtering.match.mode=regex|literal property DBZ-6973

    Fixes

    • Debezium Outbox not working with CloudEventsConverter DBZ-3642

    • Incremental snapshot data-collections are not deduplicated DBZ-6787

    • MongoDB connector no longer requires cluster-wide privileges DBZ-6888

    • Timezone Transformation can’t work DBZ-6940

    • MySQL Kafka Signalling documentation is incorrect DBZ-6941

    • Infinite loop when using OR condition in additional-condition DBZ-6956

    • Filter out specified DDL events logic is reverted DBZ-6966

    • DDL parser does not support NOCOPY keyword DBZ-6971

    • Decrease time spent in handling rebalance events DBZ-6974

    • ParsingException (MySQL/MariaDB): User specification with whitespace DBZ-6978

    • RecordsStreamProducerIT#shouldReceiveChangesForInfinityNumericWithInfinity fails on Postgres < 14 DBZ-6986

    • PostgresConnectorIT#shouldAddNewFieldToSourceInfo may fail as the schema may not exists DBZ-6987

    Other changes

    • Add option to use apicurio with TLS to system level testsuite DBZ-6954

    Release 2.4.0.CR1 (September 22nd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Vitess connector was retrying only a subset of errors by default. This behaviour is changed and only explicitly defined errors are not retried (DBZ-6944).

    New features

    • Update mongodb incremental snapshot to allow multiple threads reading chunks DBZ-6518

    • Support for GKE workload identities DBZ-6885

    • Support for PostgreSQL 16 DBZ-6911

    • Vitess connector should retry on not found errors DBZ-6939

    Fixes

    • Ad-hoc blocking snaps trigger emits schema changes of all tables DBZ-6828

    • When the start_scn corresponding to the existence of a transaction in V$TRANSACTION is 0, log mining starts from the oldest scn when the oracle connector is started for the first time DBZ-6869

    • Ensure that the connector can handle rebalance events robustly DBZ-6870

    • OpenLogReplicator confirmation can resend or omit events on restarts DBZ-6895

    • ExtractNewRecordState’s schema cache is not updated with arrival of the ddl change event DBZ-6901

    • Misleading Debezium error message when RDI port is not specified in application.properties DBZ-6902

    • Generting protobuf files to target/generated-sources breaks build DBZ-6903

    • Clean log printout in Redis Debezium Sink DBZ-6908

    • Values being omitted from list of JSON object DBZ-6910

    • fix logger named DBZ-6935

    • MySql connector get NPE when snapshot.mode is set to never and signal data collection configured DBZ-6937

    • Sanity check / retry for redo logs does not work per Oracle RAC thread DBZ-6938

    • Drop events has wrong table changes information DBZ-6945

    • Remove spaces from Signal and Notification MBean’s ObjectName DBZ-6957

    Other changes

    • Migrate all examples from mongodb.hosts to mongodb.connection.string DBZ-6893

    Release 2.4.0.Beta2 (September 13rd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Deprecated MongoDB connection connfiguration via mongodb.hosts and mongodb.members.autodiscover options was removed. Only configuration via connection string is now supported (DBZ-6892).

    New features

    • Ingest changes via OpenLogReplicator DBZ-2543

    • Only publish deltas instead of full snapshots to reduce size of sync event messages DBZ-6458

    • SMT for handling timezone conversions DBZ-6567

    • Support custom authentication on MongoDB connector DBZ-6741

    • Document mongodb.authentication.class DBZ-6788

    • Support truncating large columns DBZ-6844

    • Always reset VStream grpc channel when max size is exceeded DBZ-6852

    • Add an overview page for Connector detail DBZ-6856

    • Avoid getting NPE when executing the arrived method in ExecuteSnapshot DBZ-6865

    • Configurable order of user defined and internal aggregation pipeline DBZ-6872

    • Add support for MongoDB 7 DBZ-6882

    Fixes

    • Documentation content section in the debezium.io scroll over to the top header. DBZ-5942

    • Postgres - Incremental snapshot fails on tables with an enum type in the primary key DBZ-6481

    • schema.history.internal.store.only.captured.databases.ddl flag not considered while snapshot schema to history topic DBZ-6712

    • ExtractNewDocumentState for MongoDB ignore previous document state when handling delete event’s with REWRITE DBZ-6725

    • MongoDB New Document State Extraction: original name overriding does not work DBZ-6773

    • Error with propagation source column name DBZ-6831

    • Kafka offset store fails with NPE DBZ-6853

    • JDBC Offset storage - configuration of table name does not work DBZ-6855

    • JDBC sink insert fails with Oracle target database due to semicolon DBZ-6857

    • Oracle test shouldContinueToUpdateOffsetsEvenWhenTableIsNotChanged fails with NPE DBZ-6860

    • Tombstone events causes NPE on JDBC connector DBZ-6862

    • Debezium-MySQL not filtering AWS RDS internal events DBZ-6864

    • errors.max.retries = 0 Causes retrievable error to be ignored DBZ-6866

    • Streaming aggregation pipeline broken for combination of database filter and signal collection DBZ-6867

    • ChangeStream aggregation pipeline fails on large documents which should be excluded DBZ-6871

    • Oracle alter table drop constraint fails when cascading index DBZ-6876

    Other changes

    • Docs for Timezone SMT DBZ-6835

    • Write a blog post for custom properties step in DBZ UI DBZ-6838

    • Improve website/documentation artifact links DBZ-6850

    • Add possibility to add on-demand adjusted testing farm execution DBZ-6854

    • Oracle connector test suite logging no longer works DBZ-6859

    • Increase Oracle log level to DEBUG for several key important log messages DBZ-6880

    • Document cursor pipeline ordering and oversize document handling mode DBZ-6883

    Release 2.4.0.Beta1 (August 29th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Oracle SCN metrics were exposed as strings. This could prevent ceratin tools like JMX exporter from scraping them. Debezium currently exposes them as numerc BigInteger (DBZ-6798).

    New features

    • Provide by DDL type schema event filtering DBZ-6240

    • Add support for TimescaleDB DBZ-6482

    • Max transaction duration for Oracle connector DBZ-6615

    • Debezium 2.3.0.Final Missing Kafka Channel Documentation DBZ-6688

    • Make the Kafka channel consumer group ID configurable for the PostgreSQL connector DBZ-6689

    • Use JSON format for JMX Notification userData DBZ-6742

    • Use custom RowDeserializers in case of binlog compression DBZ-6786

    • Create a shardless topic naming strategy for vitess connector DBZ-6800

    • JDBC sink does not support SQL Server identity inserts DBZ-6801

    • Allow the embedded infinispan global configuration to be configurable DBZ-6808

    • SqlServer connector send heartbeats when there is no change in the DB DBZ-6811

    • Make finished partition deletion delay configurable. DBZ-6814

    • Add vcs.xml for idea DBZ-6825

    • Make partial and multi-response transactions debug level logs DBZ-6830

    Fixes

    • Debezium heartbeat.action.query does not start before writing to WAL. DBZ-6635

    • Schema name changed with Custom topic naming strategy DBZ-6641

    • Wrong behavior of quote.identifiers in JdbcSinkConnector DBZ-6682

    • Toasted UUID array is not properly processed DBZ-6720

    • Debezium crashes on parsing MySQL DDL statement (specific JOIN) DBZ-6724

    • When using pgoutput in postgres connector, (+/-)Infinity is not supported in decimal values DBZ-6758

    • Outbox transformation can cause connector to crash DBZ-6760

    • MongoDB New Document State Extraction: nonexistent field for add.headers DBZ-6774

    • Mongodb connector tests are massively failing when executed on 7.0-rc version DBZ-6779

    • Dbz crashes on parsing MySQL DDL statement (SELECT 1.;) DBZ-6780

    • Mysql connector tests are failing when executed without any profile DBZ-6791

    • Dbz crashed on parsing MySQL DDL statement (SELECT 1 + @sum:=1 AS ss;) DBZ-6794

    • MySQL DDL parser - REPEAT function not accepted DBZ-6803

    • Fix bug with getsnapshottingtask DBZ-6820

    • Dbz crashes on DDL statement (non Latin chars in variables) DBZ-6821

    • Not trim the default value for the BIGINT and SMALLINT types when parsing MySQL DDL DBZ-6824

    • PostgresConnectorIT#shouldAddNewFieldToSourceInfo fails randomly DBZ-6839

    • Wrong filtered comments DBZ-6840

    • Intermittend test failure: BaseSourceTaskTest.verifyTaskRestartsSuccessfully DBZ-6841

    Other changes

    • Upstream documentation connector config is not unified DBZ-6704

    • Blocking snapshot must take snapshot configurations from signal DBZ-6731

    • Documentation Request - Property File Configuration - Off-Heap Event Buffering with Embedded Infinispan DBZ-6813

    • Onboard testing farm DBZ-6827

    • When using skip.messages.without.change=true a WARN log message is reported for each record DBZ-6843

    Release 2.4.0.Alpha2 (August 9th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium Server package provided only Cassandra 4 connector. All connectors are now provided but an environment variable must be configured to select the one to use (DBZ-6638).

    MySQL did not set precision for BIGINT in precise mode. The precision is now set which can lead to a schema incompatibility (DBZ-6714).

    Default value for default Oracle LogMiner query fetch size was increase to 10000 (DBZ-6729).

    Vitess connector mapped _bin collated string columns to binary data. The columns are now correctly mapped to strings (DBZ-6748).

    Vitess connector previously applied schema changes across all shards. The schema changes are now correctly applied for each shard individually (DBZ-6775). If you are using a io.debezium.schema.DefaultTopicNamingStrategy (or a derivative) then you should switch to io.debezium.connector.vitess.TableTopicNamingStrategy to preserve the same topics. Now the DefaultTopicNamingStrategy will route messages to topics based on which shard the change occurred in.

    New features

    • Switch tracing to OpenTelemetry DBZ-2862

    • Update the Edit connector UI to incorporate the feedback received from team in demo DBZ-6514

    • Support blocking ad-hoc snapshots DBZ-6566

    • Add new parameters to RabbitMQ consumer DBZ-6581

    • Document read preference changes in 2.4 DBZ-6591

    • Log appropriate error when JDBC connector receive SchemaChange record DBZ-6655

    • Send tombstone events when partition queries are finished DBZ-6658

    • Propagate source column name and allow sink to use it DBZ-6684

    • Disable jdk-outreach-workflow.yml in forked personal repo DBZ-6702

    • Support alternative JDBC drivers in MySQL connector DBZ-6727

    • Add STOPPED and RESTARTING connector states to testing library DBZ-6734

    • Add a new parameter for selecting the db index when using Redis Storage DBZ-6759

    • Table schemas should be updated for each shard individually DBZ-6775

    Fixes

    • Connector drop down causes a scroll bar DBZ-5421

    • Provide outline for drawer component showing connector details DBZ-5831

    • Modify scroll for the running connector component DBZ-5832

    • Connector restart regression DBZ-6213

    • Document Optimal MongoDB Oplog Config for Resiliency DBZ-6455

    • JDBC Schema History: When the table name is passed as dbName.tableName, the connector does not start DBZ-6484

    • Oracle DDL parser does not properly detect end of statement when comments obfuscate the semicolon DBZ-6599

    • Received an unexpected message type that does not have an 'after' Debezium block DBZ-6637

    • When Debezium Mongodb connector encounter authentication or under privilege errors, the connection between debezium and mongodb keeps going up. DBZ-6643

    • Snapshot will not capture data when signal.data.collection is present without table.include.list DBZ-6669

    • Retriable operations are retried infinitely since error handlers are not reused DBZ-6670

    • Oracle DDL parser does not support column visibility on ALTER TABLE DBZ-6677

    • Partition duplication after rebalances with single leader task DBZ-6685

    • JDBC Sink Connector Fails on Loading Flat Data Containing Struct Type Fields from Kafka DBZ-6686

    • SQLSyntaxErrorException using Debezium JDBC Sink connector DBZ-6687

    • Should use topic.prefix rather than connector.server.name in MBean namings DBZ-6690

    • CDC - Debezium x RabbitMQ - Debezium Server crashes when an UPDATE/DELETE on source database (PostgreSQL) DBZ-6691

    • Missing operationTime field on ping command when executed against Atlas DBZ-6700

    • MongoDB SRV protocol not working in Debezium Server DBZ-6701

    • Custom properties step not working correctly in validation of the properties added by user DBZ-6711

    • Add tzdata-java to UI installation Dockerfile DBZ-6713

    • Refactor EmbeddedEngine::run method DBZ-6715

    • Oracle fails to process a DROP USER DBZ-6716

    • Oracle LogMiner mining distance calculation should be skipped when upper bounds is not within distance DBZ-6733

    • MariaDB: Unparseable DDL statement (ALTER TABLE IF EXISTS) DBZ-6736

    • MySQL dialect does not properly recognize non-default value longblob types due to typo DBZ-6753

    • Postgres tests for toasted byte array and toasted date array fail with decoderbufs plugin DBZ-6767

    • Notifications and signals leaks between MBean instances when using JMX channels DBZ-6777

    • Oracle XML column types are not properly resolved when adding XMLTYPE column during streaming DBZ-6782

    Other changes

    • Highlight information about how to configure the schema history topic to store data only for intended tables DBZ-6219

    • Blogpost about custom signalling/notification channels DBZ-6478

    • NotificationIT with Oracle xstream fails randomly DBZ-6672

    • Flaky Oracle test: shouldCaptureChangesForTransactionsAcrossSnapshotBoundaryWithoutReemittingDDLChanges DBZ-6673

    • Update documentation on XML and RAW data types DBZ-6676

    • Use descriptive text instead of ‘-1’ in ‘Time since last event’ for no event case DBZ-6681

    • MongoDB upstream documentation duplication DBZ-6705

    • Upstream documentation missing types for configurations DBZ-6707

    • Exit test suite consumption loop when connector has stopped DBZ-6730

    • Update Quarkus to 3.2.3.Final DBZ-6740

    • Decouple Debezium Server and Extension Quarkus versions DBZ-6744

    • SingleProcessor remove redundant filter logic DBZ-6745

    • OracheSchemaMigrationIT fails after adding RAW data type support DBZ-6751

    • Missing or misspelled IDs result in downstream build errors DBZ-6754

    • Bump the MySQL binlog client version to 0.28.1 which includes significant GTID event performance improvements DBZ-6783

    • Add new Redis Sink connector parameter description to the documentation DBZ-6784

    • Upgrade Kafka to 3.5.1 DBZ-6785

    Release 2.4.0.Alpha1 (July 14th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.0 and has been tested with version 3.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MongoDB connector interanlly used primary preferred hard-coded behaviour in certain scenarios. This is no longer the case and the connection string seting is used (DBZ-6521).

    Vitess connector source info now contains the shard that was the source of the event (DBZ-6617).

    New features

    • Capture & display critical connector metrics for Debezium UI DBZ-5321

    • Don’t require cluster-wide privileges when watching a single database/collection DBZ-6182

    • Debezium Offset-Editor example application DBZ-6338

    • Notify about initial snapshot progress DBZ-6416

    • Set Readpreference tags in the MongoDB client DBZ-6468

    • MySqlSnapshotChangeEventSource parallel execute createSchemaEventsForTables DBZ-6472

    • Refactor errors.max.retries to common connector framework DBZ-6573

    • Explain failure on existing publication update when switching to filtered from all_tables DBZ-6577

    • Debezium should honor read preference from connection string DBZ-6578

    • Document support for database restricted privileges for 2.4 DBZ-6592

    • Use source field in topic in table.format.name DBZ-6595

    • Support authentication with TC MongoDB deployments DBZ-6596

    • Support for getting primary key from header DBZ-6602

    • Support for custom tags in the connector metrics DBZ-6603

    • Update docs for new shard field DBZ-6627

    • Improve JDBC connector documentation DBZ-6632

    • Add configurable timeout to initialization procedure DBZ-6653

    • Introduce internal config option to control how close to CURRENT_SCN Oracle may mine DBZ-6660

    • Add support for XML_TYPE column type to Debezium connector for Oracle DBZ-3605

    Fixes

    • Mysql connector fails to parse statement FLUSH FIREWALL_RULES DBZ-3925

    • Snapshot result not saved if LAST record is filtered out DBZ-5464

    • CloudEventsConverter throws static error on Kafka Connect 3.5+ DBZ-6517

    • Dependency io.debezium:debezium-testing-testcontainers affects logback in tests DBZ-6525

    • Batches with DELETE statement first will skip everything else DBZ-6576

    • Oracle unsupported DDL statement - drop multiple partitions DBZ-6585

    • Only Struct objects supported for [Header field insertion], found: null DBZ-6588

    • Support PostgreSQL coercion for UUID, JSON, and JSONB data types DBZ-6589

    • MySQL parser cannot parse CAST AS dec DBZ-6590

    • Excessive Log Message 'Marking Processed Record for Topic' DBZ-6597

    • Fixed DataCollections for table scan completion notificaiton DBZ-6605

    • Oracle connector is not recoverable if ORA-01327 is wrapped by another JDBC or Oracle exception DBZ-6610

    • Fatal error when parsing Mysql (Percona 5.7.39-42) procedure DBZ-6613

    • Build of Potgres connector fails when building against Kafka 2.X DBZ-6614

    • Upgrade postgresql driver to v42.6.0 DBZ-6619

    • MySQL ALTER USER with RETAIN CURRENT PASSWORD fails with parsing exception DBZ-6622

    • Inaccurate documentation regarding additional-condition DBZ-6628

    • Oracle connection SQLRecoverableExceptions are not retried by default DBZ-6633

    • Cannot delete non-null interval value DBZ-6648

    • ConcurrentModificationException thrown in Debezium 2.3 DBZ-6650

    • Dbz crashes on parsing Mysql Procedure Code (Statement Labels) DBZ-6651

    • CloudEvents converter is broken for JSON message deserialization DBZ-6654

    • Vitess: Connector fails if table name is a mysql reserved word DBZ-6656

    • Junit conflicts cause by test-containers module using transitive Junit5 from quarkus DBZ-6659

    Other changes

    • Add the API endpoint to expose running connector metrics DBZ-5359

    • Display critical connector metrics DBZ-5360

    • Define and document schema history topic messages schema DBZ-5518

    • Align query.fetch.size across connectors DBZ-5676

    • Upgrade to Apache Kafka 3.5.0 DBZ-6047

    • Remove downstream related code from UI Frontend code DBZ-6394

    • Make Signal actions extensible DBZ-6417

    • Cleanup duplicit jobs from jenkins DBZ-6535

    • Implement sharded mongo ocp deployment and integration tests DBZ-6538

    • Refactor retry handling in Redis schema history DBZ-6594

    • Upgrade Quarkus to 3.2.0.Final DBZ-6626

    • Upgrade kcctl to 1.0.0.Beta3 DBZ-6642

    • Upgrade gRPC to 1.56.1 DBZ-6649

    • Disable Kafka 2.x CRON trigger DBZ-6667

    \ No newline at end of file + Release Notes for Debezium 2.4

    Release Notes for Debezium 2.4

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.4.2.Final (December 13rd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UPDATE) DBZ-7152

    • Log sequence check should treat each redo thread independently DBZ-7158

    • Parsing MySQL indexes for JSON field fails, when casting is used with types double and float DBZ-7189

    • Unchanged toasted array columns are substituted with unavailable.value.placeholder, even when REPLICA IDENTITY FULL is configured. DBZ-7193

    • Broken support for multi-namespace watching DBZ-7254

    Other changes

    There are no other changes in this release.

    Release 2.4.1.Final (November 16th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Provide capability to set image pull secrets in DS k8s CRD DBZ-6962

    • Add displayName and description metadata to DebeziumServer CRD in OLM Bundle DBZ-7011

    • Provide resources to set pod requests and limits in DS k8s CRD DBZ-7052

    • Provide svc to better collects dbz-server metrics in DS k8s DBZ-7053

    Fixes

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Missing events from Oracle 19c DBZ-6963

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • OLM bundle version for GA releases is invalid DBZ-6994

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • DDL statement couldn’t be parsed DBZ-7030

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Field exclusion does not work with events of removed fields DBZ-7058

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    Other changes

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • Use optional schema for Timezone Converter tests DBZ-7020

    • Update operator dependencies and add qosdk platform bom DBZ-7048

    • Consolidate resource labels and annotations DBZ-7064

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    Release 2.4.0.Final (October 3rd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add timestamp to Notification DBZ-6793

    • Documentation for cursor.oversize.skip.threshold is missing units DBZ-6968

    • Add MongoDB Connector support for filtering.match.mode=regex|literal property DBZ-6973

    Fixes

    • Debezium Outbox not working with CloudEventsConverter DBZ-3642

    • Incremental snapshot data-collections are not deduplicated DBZ-6787

    • MongoDB connector no longer requires cluster-wide privileges DBZ-6888

    • Timezone Transformation can’t work DBZ-6940

    • MySQL Kafka Signalling documentation is incorrect DBZ-6941

    • Infinite loop when using OR condition in additional-condition DBZ-6956

    • Filter out specified DDL events logic is reverted DBZ-6966

    • DDL parser does not support NOCOPY keyword DBZ-6971

    • Decrease time spent in handling rebalance events DBZ-6974

    • ParsingException (MySQL/MariaDB): User specification with whitespace DBZ-6978

    • RecordsStreamProducerIT#shouldReceiveChangesForInfinityNumericWithInfinity fails on Postgres < 14 DBZ-6986

    • PostgresConnectorIT#shouldAddNewFieldToSourceInfo may fail as the schema may not exists DBZ-6987

    Other changes

    • Add option to use apicurio with TLS to system level testsuite DBZ-6954

    Release 2.4.0.CR1 (September 22nd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Vitess connector was retrying only a subset of errors by default. This behaviour is changed and only explicitly defined errors are not retried (DBZ-6944).

    New features

    • Update mongodb incremental snapshot to allow multiple threads reading chunks DBZ-6518

    • Support for GKE workload identities DBZ-6885

    • Support for PostgreSQL 16 DBZ-6911

    • Vitess connector should retry on not found errors DBZ-6939

    Fixes

    • Ad-hoc blocking snaps trigger emits schema changes of all tables DBZ-6828

    • When the start_scn corresponding to the existence of a transaction in V$TRANSACTION is 0, log mining starts from the oldest scn when the oracle connector is started for the first time DBZ-6869

    • Ensure that the connector can handle rebalance events robustly DBZ-6870

    • OpenLogReplicator confirmation can resend or omit events on restarts DBZ-6895

    • ExtractNewRecordState’s schema cache is not updated with arrival of the ddl change event DBZ-6901

    • Misleading Debezium error message when RDI port is not specified in application.properties DBZ-6902

    • Generting protobuf files to target/generated-sources breaks build DBZ-6903

    • Clean log printout in Redis Debezium Sink DBZ-6908

    • Values being omitted from list of JSON object DBZ-6910

    • fix logger named DBZ-6935

    • MySql connector get NPE when snapshot.mode is set to never and signal data collection configured DBZ-6937

    • Sanity check / retry for redo logs does not work per Oracle RAC thread DBZ-6938

    • Drop events has wrong table changes information DBZ-6945

    • Remove spaces from Signal and Notification MBean’s ObjectName DBZ-6957

    Other changes

    • Migrate all examples from mongodb.hosts to mongodb.connection.string DBZ-6893

    Release 2.4.0.Beta2 (September 13rd 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Deprecated MongoDB connection connfiguration via mongodb.hosts and mongodb.members.autodiscover options was removed. Only configuration via connection string is now supported (DBZ-6892).

    New features

    • Ingest changes via OpenLogReplicator DBZ-2543

    • Only publish deltas instead of full snapshots to reduce size of sync event messages DBZ-6458

    • SMT for handling timezone conversions DBZ-6567

    • Support custom authentication on MongoDB connector DBZ-6741

    • Document mongodb.authentication.class DBZ-6788

    • Support truncating large columns DBZ-6844

    • Always reset VStream grpc channel when max size is exceeded DBZ-6852

    • Add an overview page for Connector detail DBZ-6856

    • Avoid getting NPE when executing the arrived method in ExecuteSnapshot DBZ-6865

    • Configurable order of user defined and internal aggregation pipeline DBZ-6872

    • Add support for MongoDB 7 DBZ-6882

    Fixes

    • Documentation content section in the debezium.io scroll over to the top header. DBZ-5942

    • Postgres - Incremental snapshot fails on tables with an enum type in the primary key DBZ-6481

    • schema.history.internal.store.only.captured.databases.ddl flag not considered while snapshot schema to history topic DBZ-6712

    • ExtractNewDocumentState for MongoDB ignore previous document state when handling delete event’s with REWRITE DBZ-6725

    • MongoDB New Document State Extraction: original name overriding does not work DBZ-6773

    • Error with propagation source column name DBZ-6831

    • Kafka offset store fails with NPE DBZ-6853

    • JDBC Offset storage - configuration of table name does not work DBZ-6855

    • JDBC sink insert fails with Oracle target database due to semicolon DBZ-6857

    • Oracle test shouldContinueToUpdateOffsetsEvenWhenTableIsNotChanged fails with NPE DBZ-6860

    • Tombstone events causes NPE on JDBC connector DBZ-6862

    • Debezium-MySQL not filtering AWS RDS internal events DBZ-6864

    • errors.max.retries = 0 Causes retrievable error to be ignored DBZ-6866

    • Streaming aggregation pipeline broken for combination of database filter and signal collection DBZ-6867

    • ChangeStream aggregation pipeline fails on large documents which should be excluded DBZ-6871

    • Oracle alter table drop constraint fails when cascading index DBZ-6876

    Other changes

    • Docs for Timezone SMT DBZ-6835

    • Write a blog post for custom properties step in DBZ UI DBZ-6838

    • Improve website/documentation artifact links DBZ-6850

    • Add possibility to add on-demand adjusted testing farm execution DBZ-6854

    • Oracle connector test suite logging no longer works DBZ-6859

    • Increase Oracle log level to DEBUG for several key important log messages DBZ-6880

    • Document cursor pipeline ordering and oversize document handling mode DBZ-6883

    Release 2.4.0.Beta1 (August 29th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Oracle SCN metrics were exposed as strings. This could prevent ceratin tools like JMX exporter from scraping them. Debezium currently exposes them as numerc BigInteger (DBZ-6798).

    New features

    • Provide by DDL type schema event filtering DBZ-6240

    • Add support for TimescaleDB DBZ-6482

    • Max transaction duration for Oracle connector DBZ-6615

    • Debezium 2.3.0.Final Missing Kafka Channel Documentation DBZ-6688

    • Make the Kafka channel consumer group ID configurable for the PostgreSQL connector DBZ-6689

    • Use JSON format for JMX Notification userData DBZ-6742

    • Use custom RowDeserializers in case of binlog compression DBZ-6786

    • Create a shardless topic naming strategy for vitess connector DBZ-6800

    • JDBC sink does not support SQL Server identity inserts DBZ-6801

    • Allow the embedded infinispan global configuration to be configurable DBZ-6808

    • SqlServer connector send heartbeats when there is no change in the DB DBZ-6811

    • Make finished partition deletion delay configurable. DBZ-6814

    • Add vcs.xml for idea DBZ-6825

    • Make partial and multi-response transactions debug level logs DBZ-6830

    Fixes

    • Debezium heartbeat.action.query does not start before writing to WAL. DBZ-6635

    • Schema name changed with Custom topic naming strategy DBZ-6641

    • Wrong behavior of quote.identifiers in JdbcSinkConnector DBZ-6682

    • Toasted UUID array is not properly processed DBZ-6720

    • Debezium crashes on parsing MySQL DDL statement (specific JOIN) DBZ-6724

    • When using pgoutput in postgres connector, (+/-)Infinity is not supported in decimal values DBZ-6758

    • Outbox transformation can cause connector to crash DBZ-6760

    • MongoDB New Document State Extraction: nonexistent field for add.headers DBZ-6774

    • Mongodb connector tests are massively failing when executed on 7.0-rc version DBZ-6779

    • Dbz crashes on parsing MySQL DDL statement (SELECT 1.;) DBZ-6780

    • Mysql connector tests are failing when executed without any profile DBZ-6791

    • Dbz crashed on parsing MySQL DDL statement (SELECT 1 + @sum:=1 AS ss;) DBZ-6794

    • MySQL DDL parser - REPEAT function not accepted DBZ-6803

    • Fix bug with getsnapshottingtask DBZ-6820

    • Dbz crashes on DDL statement (non Latin chars in variables) DBZ-6821

    • Not trim the default value for the BIGINT and SMALLINT types when parsing MySQL DDL DBZ-6824

    • PostgresConnectorIT#shouldAddNewFieldToSourceInfo fails randomly DBZ-6839

    • Wrong filtered comments DBZ-6840

    • Intermittend test failure: BaseSourceTaskTest.verifyTaskRestartsSuccessfully DBZ-6841

    Other changes

    • Upstream documentation connector config is not unified DBZ-6704

    • Blocking snapshot must take snapshot configurations from signal DBZ-6731

    • Documentation Request - Property File Configuration - Off-Heap Event Buffering with Embedded Infinispan DBZ-6813

    • Onboard testing farm DBZ-6827

    • When using skip.messages.without.change=true a WARN log message is reported for each record DBZ-6843

    Release 2.4.0.Alpha2 (August 9th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.1 and has been tested with version 3.5.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium Server package provided only Cassandra 4 connector. All connectors are now provided but an environment variable must be configured to select the one to use (DBZ-6638).

    MySQL did not set precision for BIGINT in precise mode. The precision is now set which can lead to a schema incompatibility (DBZ-6714).

    Default value for default Oracle LogMiner query fetch size was increase to 10000 (DBZ-6729).

    Vitess connector mapped _bin collated string columns to binary data. The columns are now correctly mapped to strings (DBZ-6748).

    Vitess connector previously applied schema changes across all shards. The schema changes are now correctly applied for each shard individually (DBZ-6775). If you are using a io.debezium.schema.DefaultTopicNamingStrategy (or a derivative) then you should switch to io.debezium.connector.vitess.TableTopicNamingStrategy to preserve the same topics. Now the DefaultTopicNamingStrategy will route messages to topics based on which shard the change occurred in.

    New features

    • Switch tracing to OpenTelemetry DBZ-2862

    • Update the Edit connector UI to incorporate the feedback received from team in demo DBZ-6514

    • Support blocking ad-hoc snapshots DBZ-6566

    • Add new parameters to RabbitMQ consumer DBZ-6581

    • Document read preference changes in 2.4 DBZ-6591

    • Log appropriate error when JDBC connector receive SchemaChange record DBZ-6655

    • Send tombstone events when partition queries are finished DBZ-6658

    • Propagate source column name and allow sink to use it DBZ-6684

    • Disable jdk-outreach-workflow.yml in forked personal repo DBZ-6702

    • Support alternative JDBC drivers in MySQL connector DBZ-6727

    • Add STOPPED and RESTARTING connector states to testing library DBZ-6734

    • Add a new parameter for selecting the db index when using Redis Storage DBZ-6759

    • Table schemas should be updated for each shard individually DBZ-6775

    Fixes

    • Connector drop down causes a scroll bar DBZ-5421

    • Provide outline for drawer component showing connector details DBZ-5831

    • Modify scroll for the running connector component DBZ-5832

    • Connector restart regression DBZ-6213

    • Document Optimal MongoDB Oplog Config for Resiliency DBZ-6455

    • JDBC Schema History: When the table name is passed as dbName.tableName, the connector does not start DBZ-6484

    • Oracle DDL parser does not properly detect end of statement when comments obfuscate the semicolon DBZ-6599

    • Received an unexpected message type that does not have an 'after' Debezium block DBZ-6637

    • When Debezium Mongodb connector encounter authentication or under privilege errors, the connection between debezium and mongodb keeps going up. DBZ-6643

    • Snapshot will not capture data when signal.data.collection is present without table.include.list DBZ-6669

    • Retriable operations are retried infinitely since error handlers are not reused DBZ-6670

    • Oracle DDL parser does not support column visibility on ALTER TABLE DBZ-6677

    • Partition duplication after rebalances with single leader task DBZ-6685

    • JDBC Sink Connector Fails on Loading Flat Data Containing Struct Type Fields from Kafka DBZ-6686

    • SQLSyntaxErrorException using Debezium JDBC Sink connector DBZ-6687

    • Should use topic.prefix rather than connector.server.name in MBean namings DBZ-6690

    • CDC - Debezium x RabbitMQ - Debezium Server crashes when an UPDATE/DELETE on source database (PostgreSQL) DBZ-6691

    • Missing operationTime field on ping command when executed against Atlas DBZ-6700

    • MongoDB SRV protocol not working in Debezium Server DBZ-6701

    • Custom properties step not working correctly in validation of the properties added by user DBZ-6711

    • Add tzdata-java to UI installation Dockerfile DBZ-6713

    • Refactor EmbeddedEngine::run method DBZ-6715

    • Oracle fails to process a DROP USER DBZ-6716

    • Oracle LogMiner mining distance calculation should be skipped when upper bounds is not within distance DBZ-6733

    • MariaDB: Unparseable DDL statement (ALTER TABLE IF EXISTS) DBZ-6736

    • MySQL dialect does not properly recognize non-default value longblob types due to typo DBZ-6753

    • Postgres tests for toasted byte array and toasted date array fail with decoderbufs plugin DBZ-6767

    • Notifications and signals leaks between MBean instances when using JMX channels DBZ-6777

    • Oracle XML column types are not properly resolved when adding XMLTYPE column during streaming DBZ-6782

    Other changes

    • Highlight information about how to configure the schema history topic to store data only for intended tables DBZ-6219

    • Blogpost about custom signalling/notification channels DBZ-6478

    • NotificationIT with Oracle xstream fails randomly DBZ-6672

    • Flaky Oracle test: shouldCaptureChangesForTransactionsAcrossSnapshotBoundaryWithoutReemittingDDLChanges DBZ-6673

    • Update documentation on XML and RAW data types DBZ-6676

    • Use descriptive text instead of ‘-1’ in ‘Time since last event’ for no event case DBZ-6681

    • MongoDB upstream documentation duplication DBZ-6705

    • Upstream documentation missing types for configurations DBZ-6707

    • Exit test suite consumption loop when connector has stopped DBZ-6730

    • Update Quarkus to 3.2.3.Final DBZ-6740

    • Decouple Debezium Server and Extension Quarkus versions DBZ-6744

    • SingleProcessor remove redundant filter logic DBZ-6745

    • OracheSchemaMigrationIT fails after adding RAW data type support DBZ-6751

    • Missing or misspelled IDs result in downstream build errors DBZ-6754

    • Bump the MySQL binlog client version to 0.28.1 which includes significant GTID event performance improvements DBZ-6783

    • Add new Redis Sink connector parameter description to the documentation DBZ-6784

    • Upgrade Kafka to 3.5.1 DBZ-6785

    Release 2.4.0.Alpha1 (July 14th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.5.0 and has been tested with version 3.5.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.4.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.4.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.4.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MongoDB connector interanlly used primary preferred hard-coded behaviour in certain scenarios. This is no longer the case and the connection string seting is used (DBZ-6521).

    Vitess connector source info now contains the shard that was the source of the event (DBZ-6617).

    New features

    • Capture & display critical connector metrics for Debezium UI DBZ-5321

    • Don’t require cluster-wide privileges when watching a single database/collection DBZ-6182

    • Debezium Offset-Editor example application DBZ-6338

    • Notify about initial snapshot progress DBZ-6416

    • Set Readpreference tags in the MongoDB client DBZ-6468

    • MySqlSnapshotChangeEventSource parallel execute createSchemaEventsForTables DBZ-6472

    • Refactor errors.max.retries to common connector framework DBZ-6573

    • Explain failure on existing publication update when switching to filtered from all_tables DBZ-6577

    • Debezium should honor read preference from connection string DBZ-6578

    • Document support for database restricted privileges for 2.4 DBZ-6592

    • Use source field in topic in table.format.name DBZ-6595

    • Support authentication with TC MongoDB deployments DBZ-6596

    • Support for getting primary key from header DBZ-6602

    • Support for custom tags in the connector metrics DBZ-6603

    • Update docs for new shard field DBZ-6627

    • Improve JDBC connector documentation DBZ-6632

    • Add configurable timeout to initialization procedure DBZ-6653

    • Introduce internal config option to control how close to CURRENT_SCN Oracle may mine DBZ-6660

    • Add support for XML_TYPE column type to Debezium connector for Oracle DBZ-3605

    Fixes

    • Mysql connector fails to parse statement FLUSH FIREWALL_RULES DBZ-3925

    • Snapshot result not saved if LAST record is filtered out DBZ-5464

    • CloudEventsConverter throws static error on Kafka Connect 3.5+ DBZ-6517

    • Dependency io.debezium:debezium-testing-testcontainers affects logback in tests DBZ-6525

    • Batches with DELETE statement first will skip everything else DBZ-6576

    • Oracle unsupported DDL statement - drop multiple partitions DBZ-6585

    • Only Struct objects supported for [Header field insertion], found: null DBZ-6588

    • Support PostgreSQL coercion for UUID, JSON, and JSONB data types DBZ-6589

    • MySQL parser cannot parse CAST AS dec DBZ-6590

    • Excessive Log Message 'Marking Processed Record for Topic' DBZ-6597

    • Fixed DataCollections for table scan completion notificaiton DBZ-6605

    • Oracle connector is not recoverable if ORA-01327 is wrapped by another JDBC or Oracle exception DBZ-6610

    • Fatal error when parsing Mysql (Percona 5.7.39-42) procedure DBZ-6613

    • Build of Potgres connector fails when building against Kafka 2.X DBZ-6614

    • Upgrade postgresql driver to v42.6.0 DBZ-6619

    • MySQL ALTER USER with RETAIN CURRENT PASSWORD fails with parsing exception DBZ-6622

    • Inaccurate documentation regarding additional-condition DBZ-6628

    • Oracle connection SQLRecoverableExceptions are not retried by default DBZ-6633

    • Cannot delete non-null interval value DBZ-6648

    • ConcurrentModificationException thrown in Debezium 2.3 DBZ-6650

    • Dbz crashes on parsing Mysql Procedure Code (Statement Labels) DBZ-6651

    • CloudEvents converter is broken for JSON message deserialization DBZ-6654

    • Vitess: Connector fails if table name is a mysql reserved word DBZ-6656

    • Junit conflicts cause by test-containers module using transitive Junit5 from quarkus DBZ-6659

    Other changes

    • Add the API endpoint to expose running connector metrics DBZ-5359

    • Display critical connector metrics DBZ-5360

    • Define and document schema history topic messages schema DBZ-5518

    • Align query.fetch.size across connectors DBZ-5676

    • Upgrade to Apache Kafka 3.5.0 DBZ-6047

    • Remove downstream related code from UI Frontend code DBZ-6394

    • Make Signal actions extensible DBZ-6417

    • Cleanup duplicit jobs from jenkins DBZ-6535

    • Implement sharded mongo ocp deployment and integration tests DBZ-6538

    • Refactor retry handling in Redis schema history DBZ-6594

    • Upgrade Quarkus to 3.2.0.Final DBZ-6626

    • Upgrade kcctl to 1.0.0.Beta3 DBZ-6642

    • Upgrade gRPC to 1.56.1 DBZ-6649

    • Disable Kafka 2.x CRON trigger DBZ-6667

    \ No newline at end of file diff --git a/releases/2.5/index.html b/releases/2.5/index.html index e4b78f35ff..2bea79e6b1 100644 --- a/releases/2.5/index.html +++ b/releases/2.5/index.html @@ -1 +1 @@ - Debezium Release Series 2.5

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 8.0.x, 8.2
    Driver: 8.0.33
    MariaDB Database: 11.1.2
    Driver: 3.2.0
    MongoDB Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 10, 11, 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.0
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.10

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.5.1.Final

    2024-01-30
    Post-images support for MongoDB; Oracle and MySQL grammar fixes; PostgreSQL heartbeats in WAL searching phase; Correct default character values for SQL Server; Debezium Server sinks correctly ignore tombstone events if they are not supported; HTTP sink retries on GOAWAY

    2.5.0.Beta1

    2023-12-04
    GTID support for MariaDB; Native RabbitMQ Streams sink; Partitioning for EventHubs sink; Additional notifications for initial snapshot; Service account for Operator CRD; MongoDB performance and metrics improvements; Streaming from PostgreSQL 16 stand-bys; Support for high-precision source timestamps

    2.5.0.Alpha2

    2023-11-10
    \ No newline at end of file + Debezium Release Series 2.5

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 8.0.x, 8.2
    Driver: 8.0.33
    MariaDB Database: 11.1.2
    Driver: 3.2.0
    MongoDB Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 10, 11, 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.0
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.10

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.5.1.Final

    2024-01-30
    Post-images support for MongoDB; Oracle and MySQL grammar fixes; PostgreSQL heartbeats in WAL searching phase; Correct default character values for SQL Server; Debezium Server sinks correctly ignore tombstone events if they are not supported; HTTP sink retries on GOAWAY

    2.5.0.Beta1

    2023-12-04
    GTID support for MariaDB; Native RabbitMQ Streams sink; Partitioning for EventHubs sink; Additional notifications for initial snapshot; Service account for Operator CRD; MongoDB performance and metrics improvements; Streaming from PostgreSQL 16 stand-bys; Support for high-precision source timestamps

    2.5.0.Alpha2

    2023-11-10
    \ No newline at end of file diff --git a/releases/2.5/release-notes.html b/releases/2.5/release-notes.html index b9db2bf793..b4e311fbdd 100644 --- a/releases/2.5/release-notes.html +++ b/releases/2.5/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 2.5

    Release Notes for Debezium 2.5

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.5.4.Final (March 27th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Full incremental snapshot on SQL Server Table skipping block of 36 records DBZ-7359

    • ReselectColumnsPostProcessor filter not use exclude predicate DBZ-7437

    • Update QOSDK and Quarkus to fix vcs-url annotation CVE DBZ-7664

    • MySQL connector fails to parse DDL with RETURNING keyword DBZ-7666

    • Schema history comparator doesn’t handle SERVER_ID_KEY and TIMESTAMP_KEY properly DBZ-7690

    Other changes

    • Add documentation for MongoDB capture.mode.full.update.type property DBZ-7647

    • Exclude jcl-over-slf4j dependency DBZ-7665

    Release 2.5.3.Final (March 19th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    SQL Server connector did not capture all schemas upon first connector start. This was a bug that could prevent adding new tables to the include list later. Now the connector correctly honors store.only.captured.tables configuration option (DBZ-7593).

    New features

    • Use TRACE level log for Debezium Server in build time DBZ-7369

    • Append LSN to txID DBZ-7454

    • Upgrade PostgreSQL driver to 42.6.1 DBZ-7571

    • Improved logging in case of PostgreSQL failure DBZ-7581

    • Performance Issue in Cassandra Connector DBZ-7622

    Fixes

    • NullPointerException in MongoDB connector DBZ-6434

    • Unavailable Toasted HSTORE Json Storage Mode column causes serialization failure DBZ-7582

    • Incorrect value of TIME(n) replicate from MySQL if the original value is negative DBZ-7594

    • Re-select Post Processor not working for complex types DBZ-7596

    • Serialization of XML columns with NULL values fails using Infinispan Buffer DBZ-7598

    • Null instead of toast placeholder written for binary types when "hex" mode configured DBZ-7599

    • Poor snapshot performance during schema snapshot DDL processing DBZ-7608

    • Re-select post processor performance DBZ-7611

    • Uncaught exception during config validation in Engine DBZ-7614

    • Incremental snapshot query doesn’t honor message.key.columns order DBZ-7617

    • Metric ScnFreezeCount never increases DBZ-7619

    • Cassandra can have misaligned Jackson dependencies DBZ-7629

    • Numerci value without mantissa cannot be parsed DBZ-7643

    Other changes

    • Numeric default value decimal scale mismatch DBZ-7562

    • Documentation conflict DBZ-7565

    • Oracle connector always brings OLR dependencies DBZ-7579

    • Correct JDBC connector dependencies DBZ-7580

    • Upgrade Debezium Server to Quarkus 3.2.10 DBZ-7624

    • Fix MySQL image fetch for tests DBZ-7651

    Release 2.5.2.Final (February 27th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Support connector scoped trustore/keystore for MongoDB DBZ-7379

    • Allow the C3P0ConnectionProvider to be customized via configuration DBZ-7431

    • Consolidate version management DBZ-7455

    Fixes

    • PostgreSQL connector doesn’t restart properly if database if not reachable DBZ-6236

    • Cassandra-4: Debezium connector stops producing events after a schema change DBZ-7363

    • Informix-Connector breaks on table with numerical default value DBZ-7372

    • Poor performance with incremental snapshot with long list of tables DBZ-7421

    • Oracle Snapshot mistakenly uses LogMiner Offset Loader by default DBZ-7425

    • Reselect columns should source key values from after Struct when not using event-key sources DBZ-7429

    • Stopwatch throw NPE when toString is called without having statistics DBZ-7436

    • Adhoc snapshots are not triggered via File channel signal when submitted before the start of the application DBZ-7441

    • LogMiner batch size does not increase automatically DBZ-7445

    • Oracle connector does not ignore reselection for excluded clob/blob columns DBZ-7456

    • The expected value pattern for table.include.list does not align with the documentation DBZ-7460

    • Signals actions are not loaded for SQLServer DBZ-7467

    • MySQL connector cannot parse table with WITH SYSTEM VERSIONING PARTITION BY SYSTEM_TIME DBZ-7468

    • Make readiness and liveness proble timouts configurable DBZ-7476

    • PreparedStatement leak in Oracle ReselectColumnsProcessor DBZ-7479

    • Poor snapshot performance with new reselect SMT DBZ-7488

    • Debezium Oracle Connector ParsingException on XMLTYPE with lob.enabled=true DBZ-7489

    • Fix MySQL 8 event timestamp resolution logic error where fallback to seconds occurs erroneously for non-GTID events DBZ-7500

    • Fix null event timestamp possible from FORMAT_DESCRIPTION and PREVIOUS_GTIDS events in MySqlStreamingChangeEventSource::setEventTimestamp DBZ-7567

    Other changes

    • Correctly handle METADATA records DBZ-7176

    • Upgrade protobuf to 3.25.2 DBZ-7442

    • Correct debezium.sink.pubsub.flowcontrol.* variable names in Debezium Server docs site DBZ-7443

    • Test Db2ReselectColumnsProcessorIT randomly fails DBZ-7471

    • Document toggling MariaDB mode DBZ-7487

    • Add Start CDC hook in Reselect Columns PostProcessor Tests DBZ-7516

    • Update commons-compress to 1.26.0 DBZ-7520

    • Promote JDBC sink from Incubating DBZ-7521

    Release 2.5.1.Final (January 30th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Re-select columns post-processor used for the key defined by message.key.columns for query building. This is not correct for most tables with primary key. The default behaviour has changed and the table primary key is used by default. A new configuration option was introduced to allow user to choose is primary key or generated key should be used (DBZ-7358).

    New features

    • MongoDb connector doesn’t use post-images DBZ-7299

    • Replace additional rolebinding definition in kubernetes.yml with @RBACRule DBZ-7381

    Fixes

    • Connector is getting stopped while processing bulk update(50k) records in debezium server 2.0.1.Final DBZ-6955

    • Empty object sent to GCP Pub/Sub after DELETE event DBZ-7098

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (subquery with UNION) DBZ-7259

    • Oracle DDL parsing error in PARTITION REFERENCE DBZ-7266

    • PostgreSQL ad-hoc blocking snapshots fail when snapshot mode is "never" DBZ-7311

    • Ad-hoc blocking snapshot dies with "invalid snapshot identifier" immediately after connector creation DBZ-7312

    • Specifying a table include list with spaces between elements cause LogMiner queries to miss matches DBZ-7315

    • Debezium heartbeat.action.query does not start before writing to WAL: part 2 DBZ-7316

    • Oracle connector is ocasionally unable to find SCN DBZ-7345

    • Initial snapshot notifications should use full identifier. DBZ-7347

    • Debezium fails after table split operation DBZ-7360

    • MSSQL wrong default values in db schema for varchar, nvarchar, char columns DBZ-7374

    • Kinesis Sink Exception on PutRecord DBZ-7417

    • ParsingException (MariaDB Only): alterSpec drop foreign key with 'tablename.' prefix DBZ-7420

    Other changes

    • Add service loader manifests for all Connect plugins DBZ-7298

    • Update Groovy version to 4.x DBZ-7340

    • Update QOSDK to the latest version DBZ-7361

    Release 2.5.0.Final (December 21st 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MongoDB default connection mode is changed from replica_set to sharded as a preparation step for its complete removal. The change will lead to invalidation of existing offsets and a silent re-execution of the initial snapshot. To prevent this a check was added that will fail the connector upon start if this would happen. The user can either set replica_set connection mode explicitly or can remove the existing offsets (DBZ-7272).

    New features

    • Support persistent history for snapshot requests for the kafka signal topic. DBZ-7164

    • Change metrics endpoint of Connect REST Extensions to use the MBeanServerv directly instead of HTTP calls to the Jolokia endpoint DBZ-7177

    • Metrics endpoint must handle connectors with multiple tasks (SQL Server) DBZ-7178

    • Add configuration option to CloudEventsConverter to customize schema type name DBZ-7235

    Fixes

    • DDL GRANT statement couldn’t be parsed DBZ-7213

    • Debezium Oracle plugin 2.5.0 Beta does not support Oracle 11g DBZ-7257

    • Error during snapshot with multiple snapshot threads will not properly abort snasphostting DBZ-7264

    • MySQL RDS UPDATE queries not ignored DBZ-7271

    • Leaking JDBC connections DBZ-7275

    • IncrementalSnapshotCaseSensitiveIT#insertDeleteWatermarkingStrategy fails DBZ-7276

    • Debezium MySQL could not parse certain grant privileges. DBZ-7277

    • Add PL/SQL Parser for Create Table Memoptimize DBZ-7279

    • Support for Creating EDITIONABLE or NONEDITIONABLE Packages DBZ-7283

    • Add PL/SQL Parser for Alter Table Memoptimize DBZ-7268

    Other changes

    • Move metrics endpoint from UI backend to the Debezium Connect REST extension/s DBZ-6764

    • website-builder image fails with newer bundler DBZ-7269

    • Vitess connector build fails due to invalid GPG key DBZ-7280

    Release 2.5.0.CR1 (December 14th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    The schema name prefix and letter casing for Cloud Event headers was not consistent with payload name. The schema name was aligned so both headers and payload share th same namespace and follow the same rules for letter casing (DBZ-7216).

    MySQL BIT datatype did not have an implicit length if any was not set. This is incorrect as the default length if none is provided is 1 (DBZ-7230).

    New features

    • Explore BLOB support via re-selection DBZ-4321

    • Use the StreamNameMapper in debezium-server-kafka DBZ-6071

    • Provide INSERT/DELETE semantics for incremental snapshot watermarking DBZ-6834

    • AWS SQS as sink type in Debezium standalone server DBZ-7214

    • Oracle LOB to be properly ignored if lob.enabled=false DBZ-7237

    • Upgrade Kafka to 3.6.1 and ZooKeeper to 3.8.3 DBZ-7238

    Fixes

    • Oracle abandoned transaction implementation bug causes OoM DBZ-7236

    • Add Grammar Oracle Truncate Cluster DBZ-7242

    • Length value is not removed when changing a column’s type DBZ-7251

    • MongoDB table/colelction snapshot notification contain incorrect offsets DBZ-7252

    • Broken support for multi-namespace watching DBZ-7254

    Other changes

    • Add tracing logs to track execution time for Debezium JDBC connector DBZ-7217

    • Validate & clarify multiple archive log destination requirements for Oracle DBZ-7218

    • Upgrade logback to 1.2.13 DBZ-7232

    Release 2.5.0.Beta1 (December 4th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.0 and has been tested with version 3.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MongoDB no longer allows doing collection snapshot on a specific shard in sharded deployment. This was never intended and supported feature and was removed as a step in complete replica set streaming mode removal (DBZ-7139).

    ComputePartition SMT was deprecated and replaced with significantly improved PartitionRouting SMT. The original SMT is now completely removed (DBZ-7141).

    JDBC Sink connector was storing default values in database columns when null value was streamed and such value existed. This was incorrect behaviour and is now fixed (DBZ-7191).

    New features

    • Support for mariadb GTID DBZ-1482

    • Include only certain columns in JDBC sink connector DBZ-6636

    • Support native RabbitMQ Streams DBZ-6703

    • Add support for partitioning with Azure EventHubs DBZ-6723

    • Enhance Notification information and more notifications for Initial Snapshots DBZ-6878

    • Add handling for CDB and non-CDB / PDB in Oracle REST Extension tests DBZ-7091

    • Check schema length when create value to find missed DDL by SQL_BIN_LOG=OFF DBZ-7093

    • Add service account parameter to DebeziumServer CRD DBZ-7111

    • Inactivity pause in MongoDB connector should be configurable DBZ-7146

    • Oracle Infinispan event processor speed-up using in memory cache DBZ-7153

    • Add last event process time, number of events, number of heartbeat events metrics to MongoDb connector DBZ-7162

    • LogMiner ISPN event buffer recent transaction optimization DBZ-7169

    • Support logical decoding from Postgres 16 stand-bys DBZ-7181

    • Support MySQL 8 high resolution replication timestamps from GTID events DBZ-7183

    • Use buffer queue when reading MongoDB change stream events DBZ-7184

    • Cleanup event processing loop in streaming event source of MongoDB connector DBZ-7186

    • Oracle Infinispan - implement support for abandoned transactions DBZ-7192

    • Add ability to avoid throwing an exception for missing additional fields DBZ-7197

    • XStream attach should be retriable DBZ-7207

    Fixes

    • Test Avro adjustment for MongoDb connector and ExtractNewDocumentState SMT DBZ-6809

    • The DefaultDeleteHandlingStrategy couldn’t add the rewrite "__deleted" field to a non-struct value DBZ-7066

    • Debezium server has no default for offset.flush.interval.ms DBZ-7099

    • Failed to authenticate to the MySQL database after snapshot DBZ-7132

    • Failure reading CURRENT_TIMESTAMP on Informix 12.10 DBZ-7137

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    • outbox.EventRouter SMT throws NullPointerException when there is a whitespace in fields.additional.placement value DBZ-7142

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UPDATE) DBZ-7152

    • JsonSerialisation is unable to process changes from sharded collections with composite sharding key DBZ-7157

    • Log sequence check should treat each redo thread independently DBZ-7158

    • Fix DebeziumMySqlConnectorResource not using the new MySQL adatper structure to support different MySQL flavors DBZ-7179

    • Parsing MySQL indexes for JSON field fails, when casting is used with types double and float DBZ-7189

    • Unchanged toasted array columns are substituted with unavailable.value.placeholder, even when REPLICA IDENTITY FULL is configured. DBZ-7193

    • MongoDB streaming pauses for Blocking Snapshot only when there is no event DBZ-7206

    • NPE on AbstractInfinispanLogMinerEventProcessor.logCacheStats DBZ-7211

    Other changes

    • Generate sundrio fluent builders for operator model DBZ-6550

    • Convert operator source into multi module project DBZ-6551

    • Implement "validate filters" endpoint in connector-specific Connect REST extensions DBZ-6762

    • Implement IT tests against Cloud Spanner emulator in main repo. DBZ-6906

    • Implement strategy pattern for MariaDB and MySQL differences DBZ-7083

    • Run MySQL CI builds in parallel DBZ-7135

    • Add matrix strategy to workflows DBZ-7154

    • Add Unit Tests for ServiceAccountDependent Class in Debezium Operator Repository DBZ-7155

    • Fail fast during deserialization if a value is not a CloudEvent DBZ-7159

    • Correctly calculate Max LSN DBZ-7175

    • Upgrade to Infinispan 14.0.20 DBZ-7187

    • Upgrade Outbox Extension to Quarkus 3.5.3 DBZ-7188

    • Enable ability to stream changes against Oracle 23c for LogMiner DBZ-7194

    • Add modify range_partitions to modify_table_partition rule in parsing PL/SQL DBZ-7196

    • Handle Drop Tablespace in PL/SQL DBZ-7208

    • Upgrade logback to 1.2.12 DBZ-7209

    Release 2.5.0.Alpha2 (November 10th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.0 and has been tested with version 3.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MySQL 5.7 is end of life. Debezium is no longer officially supporting it or testing with it. Further support is on best-effort basis only (DBZ-6874).

    Configuration option metadata.location was renamed to metadata.source for CloudEvents converter (DBZ-7060).

    MongoDB default connection mode is switched from replica_set to sharded. This is the first step in removing the replica_set mode completely. Please note that the offsets will be invalidated and snapshot triggered for existing deployments unless the configuration option is explicitly set (DBZ-7108).

    Using deprecated embbeded engine API is no longer possible. Only Debezium Engine API is available (DBZ-7110).

    New features

    • JDBC Sink Connector - Support batch operations DBZ-6317

    • Utilize $changeStreamSplitLargeEvent to handle large change events with post and pre images DBZ-6726

    • Add support for MySQL 8.2 DBZ-6873

    • Kinesis Sink Reliability DBZ-7032

    • Upgrade MSSQL JDBC driver to support sensitivity classification DBZ-7109

    • Add maximum retry limit to Redis Schema History DBZ-7120

    • Emit a notification when completed reading from a capture instance DBZ-7043

    Fixes

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Connector frequently misses commit operations DBZ-6942

    • Missing events from Oracle 19c DBZ-6963

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Field exclusion does not work with events of removed fields DBZ-7058

    • JDBC sink connector not working with CloudEvent DBZ-7065

    • JDBC connection leak when error occurs during processing DBZ-7069

    • Some server tests fail due to @com.google.inject.Inject annotation DBZ-7077

    • HttpIT fails with "Unrecognized field subEvents" DBZ-7092

    • MySQL parser does not conform to arithmetical operation priorities DBZ-7095

    • When RelationalBaseSourceConnector#validateConnection is called with invalid config [inside Connector#validate()] can lead to exceptions DBZ-7105

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    Other changes

    • Add (integration) tests for Oracle connector-specific Debezium Connect REST extension DBZ-6763

    • Intermittent failure of MongoDbReplicaSetAuthTest DBZ-6875

    • Mongodb tests in RHEL system testsuite are failing with DBZ 2.3.4 DBZ-6996

    • Use DebeziumEngine instead of EmbeddedEngine in the testsuite DBZ-7007

    • Update transformation property "delete.tombstone.handling.mode" to debezium doc DBZ-7062

    • Add MariaDB driver for testing and distribution DBZ-7085

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    • VitessConnectorIT.shouldTaskFailIfColumnNameInvalid fails DBZ-7104

    Release 2.5.0.Alpha1 (October 26th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.0 and has been tested with version 3.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    ExtractNewRecordState and ExtractNewDocumentState SMT configuration was reworked. Two disting options the separately controlled handling of delete and tombstone records were merged into a single one (DBZ-6907).

    Support for MongoDB 4.4 is deprecated (DBZ-6881).

    New features

    • Provide first class support for MariaDB DBZ-2913

    • Support for IBM Informix DBZ-4999

    • Add support for honouring MongoDB read preference in change stream after promotion DBZ-5953

    • Enable Spanner Connector against Cloud Spanner Emulator DBZ-6845

    • Refactor Oracle streaming metrics beans DBZ-6899

    • Provide capability to set image pull secrets in DS k8s CRD DBZ-6962

    • Upgrade to Vitess 17 for integration tests DBZ-6981

    • Add the ability to sanitize field name when inferencing json schema DBZ-6983

    • Allow OLM Bundle scripts to download from maven central by default DBZ-6995

    • Enhance README.md with Instructions for Creating a Kubernetes Namespace DBZ-7004

    • Support OKD/Openshift catalog in OH release script DBZ-7010

    • Add displayName and description metadata to DebeziumServer CRD in OLM Bundle DBZ-7011

    • Upgrade Kafka to 3.6.0 DBZ-7033

    • DebeziumConnector always attempts to contact Quay.io to determine latest stable version DBZ-7044

    • Support snapshot with automatic retry DBZ-7050

    • Provide resources to set pod requests and limits in DS k8s CRD DBZ-7052

    • Provide svc to better collects dbz-server metrics in DS k8s DBZ-7053

    • Improve logging at DEBUG level for Commit events DBZ-7067

    • Replace schema tracking restriction for SYS/SYSTEM users with configurable option DBZ-7071

    Fixes

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Single quote replication includes escaped quotes for N(CHAR/VARCHAR) columns DBZ-6975

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • Debezium doesn’t compile with JDK 21 DBZ-6992

    • OLM bundle version for GA releases is invalid DBZ-6994

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • DDL statement couldn’t be parsed DBZ-7030

    • Blocking ad-hoc snapshot is not really blocking for MySQL DBZ-7035

    • Fake ROTATE event on connection restart cleans metadata DBZ-7037

    Other changes

    • Adding Debezium Server example using MySQL and GCP PubSub DBZ-4471

    • Test Debezium against MSSQL 2016 DBZ-6693

    • Test Debezium against DB2 1.5.8.0 DBZ-6694

    • Add MSSQL 2022 to test matrix DBZ-6695

    • Edit test matrix after team evaluation DBZ-6696

    • Edit test automation to run both DB2 1.5.8.0 and 1.5.0.0a DBZ-6697

    • Refactor ElapsedTimeStrategy DBZ-6778

    • Provide configuration option to exclude extension attributes from a CloudEvent DBZ-6982

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • Remove deprecated embedded engine code DBZ-7013

    • Enable replication slot advance check DBZ-7015

    • Add configuration option to CloudEventsConverter to retrieve id and type from headers DBZ-7016

    • Use optional schema for Timezone Converter tests DBZ-7020

    • Debezium Operator blogpost DBZ-7025

    • Apply 2.3.4 updates to main branch DBZ-7039

    • Update documentation with Postgres’s pgoutput limitation DBZ-7041

    • Use oracle container registry for MySQL images DBZ-7042

    • Updates to fix build of downstream doc DBZ-7046

    • Update operator dependencies and add qosdk platform bom DBZ-7048

    • Upgrade maven-surefire-plugin to 3.1.2 DBZ-7055

    • Consolidate resource labels and annotations DBZ-7064

    • Disable time sync in Testing farm test runs DBZ-7074

    \ No newline at end of file + Release Notes for Debezium 2.5

    Release Notes for Debezium 2.5

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.5.4.Final (March 27th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    • Full incremental snapshot on SQL Server Table skipping block of 36 records DBZ-7359

    • ReselectColumnsPostProcessor filter not use exclude predicate DBZ-7437

    • Update QOSDK and Quarkus to fix vcs-url annotation CVE DBZ-7664

    • MySQL connector fails to parse DDL with RETURNING keyword DBZ-7666

    • Schema history comparator doesn’t handle SERVER_ID_KEY and TIMESTAMP_KEY properly DBZ-7690

    Other changes

    • Add documentation for MongoDB capture.mode.full.update.type property DBZ-7647

    • Exclude jcl-over-slf4j dependency DBZ-7665

    Release 2.5.3.Final (March 19th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    SQL Server connector did not capture all schemas upon first connector start. This was a bug that could prevent adding new tables to the include list later. Now the connector correctly honors store.only.captured.tables configuration option (DBZ-7593).

    New features

    • Use TRACE level log for Debezium Server in build time DBZ-7369

    • Append LSN to txID DBZ-7454

    • Upgrade PostgreSQL driver to 42.6.1 DBZ-7571

    • Improved logging in case of PostgreSQL failure DBZ-7581

    • Performance Issue in Cassandra Connector DBZ-7622

    Fixes

    • NullPointerException in MongoDB connector DBZ-6434

    • Unavailable Toasted HSTORE Json Storage Mode column causes serialization failure DBZ-7582

    • Incorrect value of TIME(n) replicate from MySQL if the original value is negative DBZ-7594

    • Re-select Post Processor not working for complex types DBZ-7596

    • Serialization of XML columns with NULL values fails using Infinispan Buffer DBZ-7598

    • Null instead of toast placeholder written for binary types when "hex" mode configured DBZ-7599

    • Poor snapshot performance during schema snapshot DDL processing DBZ-7608

    • Re-select post processor performance DBZ-7611

    • Uncaught exception during config validation in Engine DBZ-7614

    • Incremental snapshot query doesn’t honor message.key.columns order DBZ-7617

    • Metric ScnFreezeCount never increases DBZ-7619

    • Cassandra can have misaligned Jackson dependencies DBZ-7629

    • Numerci value without mantissa cannot be parsed DBZ-7643

    Other changes

    • Numeric default value decimal scale mismatch DBZ-7562

    • Documentation conflict DBZ-7565

    • Oracle connector always brings OLR dependencies DBZ-7579

    • Correct JDBC connector dependencies DBZ-7580

    • Upgrade Debezium Server to Quarkus 3.2.10 DBZ-7624

    • Fix MySQL image fetch for tests DBZ-7651

    Release 2.5.2.Final (February 27th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Support connector scoped trustore/keystore for MongoDB DBZ-7379

    • Allow the C3P0ConnectionProvider to be customized via configuration DBZ-7431

    • Consolidate version management DBZ-7455

    Fixes

    • PostgreSQL connector doesn’t restart properly if database if not reachable DBZ-6236

    • Cassandra-4: Debezium connector stops producing events after a schema change DBZ-7363

    • Informix-Connector breaks on table with numerical default value DBZ-7372

    • Poor performance with incremental snapshot with long list of tables DBZ-7421

    • Oracle Snapshot mistakenly uses LogMiner Offset Loader by default DBZ-7425

    • Reselect columns should source key values from after Struct when not using event-key sources DBZ-7429

    • Stopwatch throw NPE when toString is called without having statistics DBZ-7436

    • Adhoc snapshots are not triggered via File channel signal when submitted before the start of the application DBZ-7441

    • LogMiner batch size does not increase automatically DBZ-7445

    • Oracle connector does not ignore reselection for excluded clob/blob columns DBZ-7456

    • The expected value pattern for table.include.list does not align with the documentation DBZ-7460

    • Signals actions are not loaded for SQLServer DBZ-7467

    • MySQL connector cannot parse table with WITH SYSTEM VERSIONING PARTITION BY SYSTEM_TIME DBZ-7468

    • Make readiness and liveness proble timouts configurable DBZ-7476

    • PreparedStatement leak in Oracle ReselectColumnsProcessor DBZ-7479

    • Poor snapshot performance with new reselect SMT DBZ-7488

    • Debezium Oracle Connector ParsingException on XMLTYPE with lob.enabled=true DBZ-7489

    • Fix MySQL 8 event timestamp resolution logic error where fallback to seconds occurs erroneously for non-GTID events DBZ-7500

    • Fix null event timestamp possible from FORMAT_DESCRIPTION and PREVIOUS_GTIDS events in MySqlStreamingChangeEventSource::setEventTimestamp DBZ-7567

    Other changes

    • Correctly handle METADATA records DBZ-7176

    • Upgrade protobuf to 3.25.2 DBZ-7442

    • Correct debezium.sink.pubsub.flowcontrol.* variable names in Debezium Server docs site DBZ-7443

    • Test Db2ReselectColumnsProcessorIT randomly fails DBZ-7471

    • Document toggling MariaDB mode DBZ-7487

    • Add Start CDC hook in Reselect Columns PostProcessor Tests DBZ-7516

    • Update commons-compress to 1.26.0 DBZ-7520

    • Promote JDBC sink from Incubating DBZ-7521

    Release 2.5.1.Final (January 30th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Re-select columns post-processor used for the key defined by message.key.columns for query building. This is not correct for most tables with primary key. The default behaviour has changed and the table primary key is used by default. A new configuration option was introduced to allow user to choose is primary key or generated key should be used (DBZ-7358).

    New features

    • MongoDb connector doesn’t use post-images DBZ-7299

    • Replace additional rolebinding definition in kubernetes.yml with @RBACRule DBZ-7381

    Fixes

    • Connector is getting stopped while processing bulk update(50k) records in debezium server 2.0.1.Final DBZ-6955

    • Empty object sent to GCP Pub/Sub after DELETE event DBZ-7098

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (subquery with UNION) DBZ-7259

    • Oracle DDL parsing error in PARTITION REFERENCE DBZ-7266

    • PostgreSQL ad-hoc blocking snapshots fail when snapshot mode is "never" DBZ-7311

    • Ad-hoc blocking snapshot dies with "invalid snapshot identifier" immediately after connector creation DBZ-7312

    • Specifying a table include list with spaces between elements cause LogMiner queries to miss matches DBZ-7315

    • Debezium heartbeat.action.query does not start before writing to WAL: part 2 DBZ-7316

    • Oracle connector is ocasionally unable to find SCN DBZ-7345

    • Initial snapshot notifications should use full identifier. DBZ-7347

    • Debezium fails after table split operation DBZ-7360

    • MSSQL wrong default values in db schema for varchar, nvarchar, char columns DBZ-7374

    • Kinesis Sink Exception on PutRecord DBZ-7417

    • ParsingException (MariaDB Only): alterSpec drop foreign key with 'tablename.' prefix DBZ-7420

    Other changes

    • Add service loader manifests for all Connect plugins DBZ-7298

    • Update Groovy version to 4.x DBZ-7340

    • Update QOSDK to the latest version DBZ-7361

    Release 2.5.0.Final (December 21st 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MongoDB default connection mode is changed from replica_set to sharded as a preparation step for its complete removal. The change will lead to invalidation of existing offsets and a silent re-execution of the initial snapshot. To prevent this a check was added that will fail the connector upon start if this would happen. The user can either set replica_set connection mode explicitly or can remove the existing offsets (DBZ-7272).

    New features

    • Support persistent history for snapshot requests for the kafka signal topic. DBZ-7164

    • Change metrics endpoint of Connect REST Extensions to use the MBeanServerv directly instead of HTTP calls to the Jolokia endpoint DBZ-7177

    • Metrics endpoint must handle connectors with multiple tasks (SQL Server) DBZ-7178

    • Add configuration option to CloudEventsConverter to customize schema type name DBZ-7235

    Fixes

    • DDL GRANT statement couldn’t be parsed DBZ-7213

    • Debezium Oracle plugin 2.5.0 Beta does not support Oracle 11g DBZ-7257

    • Error during snapshot with multiple snapshot threads will not properly abort snasphostting DBZ-7264

    • MySQL RDS UPDATE queries not ignored DBZ-7271

    • Leaking JDBC connections DBZ-7275

    • IncrementalSnapshotCaseSensitiveIT#insertDeleteWatermarkingStrategy fails DBZ-7276

    • Debezium MySQL could not parse certain grant privileges. DBZ-7277

    • Add PL/SQL Parser for Create Table Memoptimize DBZ-7279

    • Support for Creating EDITIONABLE or NONEDITIONABLE Packages DBZ-7283

    • Add PL/SQL Parser for Alter Table Memoptimize DBZ-7268

    Other changes

    • Move metrics endpoint from UI backend to the Debezium Connect REST extension/s DBZ-6764

    • website-builder image fails with newer bundler DBZ-7269

    • Vitess connector build fails due to invalid GPG key DBZ-7280

    Release 2.5.0.CR1 (December 14th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    The schema name prefix and letter casing for Cloud Event headers was not consistent with payload name. The schema name was aligned so both headers and payload share th same namespace and follow the same rules for letter casing (DBZ-7216).

    MySQL BIT datatype did not have an implicit length if any was not set. This is incorrect as the default length if none is provided is 1 (DBZ-7230).

    New features

    • Explore BLOB support via re-selection DBZ-4321

    • Use the StreamNameMapper in debezium-server-kafka DBZ-6071

    • Provide INSERT/DELETE semantics for incremental snapshot watermarking DBZ-6834

    • AWS SQS as sink type in Debezium standalone server DBZ-7214

    • Oracle LOB to be properly ignored if lob.enabled=false DBZ-7237

    • Upgrade Kafka to 3.6.1 and ZooKeeper to 3.8.3 DBZ-7238

    Fixes

    • Oracle abandoned transaction implementation bug causes OoM DBZ-7236

    • Add Grammar Oracle Truncate Cluster DBZ-7242

    • Length value is not removed when changing a column’s type DBZ-7251

    • MongoDB table/colelction snapshot notification contain incorrect offsets DBZ-7252

    • Broken support for multi-namespace watching DBZ-7254

    Other changes

    • Add tracing logs to track execution time for Debezium JDBC connector DBZ-7217

    • Validate & clarify multiple archive log destination requirements for Oracle DBZ-7218

    • Upgrade logback to 1.2.13 DBZ-7232

    Release 2.5.0.Beta1 (December 4th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.0 and has been tested with version 3.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MongoDB no longer allows doing collection snapshot on a specific shard in sharded deployment. This was never intended and supported feature and was removed as a step in complete replica set streaming mode removal (DBZ-7139).

    ComputePartition SMT was deprecated and replaced with significantly improved PartitionRouting SMT. The original SMT is now completely removed (DBZ-7141).

    JDBC Sink connector was storing default values in database columns when null value was streamed and such value existed. This was incorrect behaviour and is now fixed (DBZ-7191).

    New features

    • Support for mariadb GTID DBZ-1482

    • Include only certain columns in JDBC sink connector DBZ-6636

    • Support native RabbitMQ Streams DBZ-6703

    • Add support for partitioning with Azure EventHubs DBZ-6723

    • Enhance Notification information and more notifications for Initial Snapshots DBZ-6878

    • Add handling for CDB and non-CDB / PDB in Oracle REST Extension tests DBZ-7091

    • Check schema length when create value to find missed DDL by SQL_BIN_LOG=OFF DBZ-7093

    • Add service account parameter to DebeziumServer CRD DBZ-7111

    • Inactivity pause in MongoDB connector should be configurable DBZ-7146

    • Oracle Infinispan event processor speed-up using in memory cache DBZ-7153

    • Add last event process time, number of events, number of heartbeat events metrics to MongoDb connector DBZ-7162

    • LogMiner ISPN event buffer recent transaction optimization DBZ-7169

    • Support logical decoding from Postgres 16 stand-bys DBZ-7181

    • Support MySQL 8 high resolution replication timestamps from GTID events DBZ-7183

    • Use buffer queue when reading MongoDB change stream events DBZ-7184

    • Cleanup event processing loop in streaming event source of MongoDB connector DBZ-7186

    • Oracle Infinispan - implement support for abandoned transactions DBZ-7192

    • Add ability to avoid throwing an exception for missing additional fields DBZ-7197

    • XStream attach should be retriable DBZ-7207

    Fixes

    • Test Avro adjustment for MongoDb connector and ExtractNewDocumentState SMT DBZ-6809

    • The DefaultDeleteHandlingStrategy couldn’t add the rewrite "__deleted" field to a non-struct value DBZ-7066

    • Debezium server has no default for offset.flush.interval.ms DBZ-7099

    • Failed to authenticate to the MySQL database after snapshot DBZ-7132

    • Failure reading CURRENT_TIMESTAMP on Informix 12.10 DBZ-7137

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UNION) DBZ-7140

    • outbox.EventRouter SMT throws NullPointerException when there is a whitespace in fields.additional.placement value DBZ-7142

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (specific UPDATE) DBZ-7152

    • JsonSerialisation is unable to process changes from sharded collections with composite sharding key DBZ-7157

    • Log sequence check should treat each redo thread independently DBZ-7158

    • Fix DebeziumMySqlConnectorResource not using the new MySQL adatper structure to support different MySQL flavors DBZ-7179

    • Parsing MySQL indexes for JSON field fails, when casting is used with types double and float DBZ-7189

    • Unchanged toasted array columns are substituted with unavailable.value.placeholder, even when REPLICA IDENTITY FULL is configured. DBZ-7193

    • MongoDB streaming pauses for Blocking Snapshot only when there is no event DBZ-7206

    • NPE on AbstractInfinispanLogMinerEventProcessor.logCacheStats DBZ-7211

    Other changes

    • Generate sundrio fluent builders for operator model DBZ-6550

    • Convert operator source into multi module project DBZ-6551

    • Implement "validate filters" endpoint in connector-specific Connect REST extensions DBZ-6762

    • Implement IT tests against Cloud Spanner emulator in main repo. DBZ-6906

    • Implement strategy pattern for MariaDB and MySQL differences DBZ-7083

    • Run MySQL CI builds in parallel DBZ-7135

    • Add matrix strategy to workflows DBZ-7154

    • Add Unit Tests for ServiceAccountDependent Class in Debezium Operator Repository DBZ-7155

    • Fail fast during deserialization if a value is not a CloudEvent DBZ-7159

    • Correctly calculate Max LSN DBZ-7175

    • Upgrade to Infinispan 14.0.20 DBZ-7187

    • Upgrade Outbox Extension to Quarkus 3.5.3 DBZ-7188

    • Enable ability to stream changes against Oracle 23c for LogMiner DBZ-7194

    • Add modify range_partitions to modify_table_partition rule in parsing PL/SQL DBZ-7196

    • Handle Drop Tablespace in PL/SQL DBZ-7208

    • Upgrade logback to 1.2.12 DBZ-7209

    Release 2.5.0.Alpha2 (November 10th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.0 and has been tested with version 3.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MySQL 5.7 is end of life. Debezium is no longer officially supporting it or testing with it. Further support is on best-effort basis only (DBZ-6874).

    Configuration option metadata.location was renamed to metadata.source for CloudEvents converter (DBZ-7060).

    MongoDB default connection mode is switched from replica_set to sharded. This is the first step in removing the replica_set mode completely. Please note that the offsets will be invalidated and snapshot triggered for existing deployments unless the configuration option is explicitly set (DBZ-7108).

    Using deprecated embbeded engine API is no longer possible. Only Debezium Engine API is available (DBZ-7110).

    New features

    • JDBC Sink Connector - Support batch operations DBZ-6317

    • Utilize $changeStreamSplitLargeEvent to handle large change events with post and pre images DBZ-6726

    • Add support for MySQL 8.2 DBZ-6873

    • Kinesis Sink Reliability DBZ-7032

    • Upgrade MSSQL JDBC driver to support sensitivity classification DBZ-7109

    • Add maximum retry limit to Redis Schema History DBZ-7120

    • Emit a notification when completed reading from a capture instance DBZ-7043

    Fixes

    • Oracle RAC throws ORA-00310: archive log sequence required DBZ-5350

    • oracle missing CDC data DBZ-5656

    • Missing oracle cdc records DBZ-5750

    • Connector frequently misses commit operations DBZ-6942

    • Missing events from Oracle 19c DBZ-6963

    • Debezium Embedded Infinispan Performs Slowly DBZ-7047

    • Field exclusion does not work with events of removed fields DBZ-7058

    • JDBC sink connector not working with CloudEvent DBZ-7065

    • JDBC connection leak when error occurs during processing DBZ-7069

    • Some server tests fail due to @com.google.inject.Inject annotation DBZ-7077

    • HttpIT fails with "Unrecognized field subEvents" DBZ-7092

    • MySQL parser does not conform to arithmetical operation priorities DBZ-7095

    • When RelationalBaseSourceConnector#validateConnection is called with invalid config [inside Connector#validate()] can lead to exceptions DBZ-7105

    • Debezium crashes on parsing MySQL DDL statement (specific INSERT) DBZ-7119

    Other changes

    • Add (integration) tests for Oracle connector-specific Debezium Connect REST extension DBZ-6763

    • Intermittent failure of MongoDbReplicaSetAuthTest DBZ-6875

    • Mongodb tests in RHEL system testsuite are failing with DBZ 2.3.4 DBZ-6996

    • Use DebeziumEngine instead of EmbeddedEngine in the testsuite DBZ-7007

    • Update transformation property "delete.tombstone.handling.mode" to debezium doc DBZ-7062

    • Add MariaDB driver for testing and distribution DBZ-7085

    • Allow DS JMX to use username-password authentication on k8 DBZ-7087

    • VitessConnectorIT.shouldTaskFailIfColumnNameInvalid fails DBZ-7104

    Release 2.5.0.Alpha1 (October 26th 2023)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.0 and has been tested with version 3.6.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.5.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.5.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.5.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    ExtractNewRecordState and ExtractNewDocumentState SMT configuration was reworked. Two disting options the separately controlled handling of delete and tombstone records were merged into a single one (DBZ-6907).

    Support for MongoDB 4.4 is deprecated (DBZ-6881).

    New features

    • Provide first class support for MariaDB DBZ-2913

    • Support for IBM Informix DBZ-4999

    • Add support for honouring MongoDB read preference in change stream after promotion DBZ-5953

    • Enable Spanner Connector against Cloud Spanner Emulator DBZ-6845

    • Refactor Oracle streaming metrics beans DBZ-6899

    • Provide capability to set image pull secrets in DS k8s CRD DBZ-6962

    • Upgrade to Vitess 17 for integration tests DBZ-6981

    • Add the ability to sanitize field name when inferencing json schema DBZ-6983

    • Allow OLM Bundle scripts to download from maven central by default DBZ-6995

    • Enhance README.md with Instructions for Creating a Kubernetes Namespace DBZ-7004

    • Support OKD/Openshift catalog in OH release script DBZ-7010

    • Add displayName and description metadata to DebeziumServer CRD in OLM Bundle DBZ-7011

    • Upgrade Kafka to 3.6.0 DBZ-7033

    • DebeziumConnector always attempts to contact Quay.io to determine latest stable version DBZ-7044

    • Support snapshot with automatic retry DBZ-7050

    • Provide resources to set pod requests and limits in DS k8s CRD DBZ-7052

    • Provide svc to better collects dbz-server metrics in DS k8s DBZ-7053

    • Improve logging at DEBUG level for Commit events DBZ-7067

    • Replace schema tracking restriction for SYS/SYSTEM users with configurable option DBZ-7071

    Fixes

    • Multiple debezium:offsets Redis clients DBZ-6952

    • Wrong case-behavior for non-avro column name in sink connector DBZ-6958

    • Handle properly bytea field for jdbc sink to postgresql DBZ-6967

    • Debezium jdbc sink process truncate event failure DBZ-6970

    • Single quote replication includes escaped quotes for N(CHAR/VARCHAR) columns DBZ-6975

    • Debezium jdbc sink should throw not supporting schema change topic exception DBZ-6990

    • Debezium doesn’t compile with JDK 21 DBZ-6992

    • OLM bundle version for GA releases is invalid DBZ-6994

    • ALTER TABLE fails when adding multiple columns to JDBC sink target DBZ-6999

    • Invalid Link to zulip chat in CSV metadata DBZ-7000

    • Make sure to terminate the task once connectivity is lost to either the rebalance or sync topic DBZ-7001

    • Missing .metadata.annotations.repository field in CSV metadata DBZ-7003

    • Single quote replication and loss of data DBZ-7006

    • Oracle connector: Payload size over 76020 bytes are getting truncated DBZ-7018

    • DDL statement couldn’t be parsed DBZ-7030

    • Blocking ad-hoc snapshot is not really blocking for MySQL DBZ-7035

    • Fake ROTATE event on connection restart cleans metadata DBZ-7037

    Other changes

    • Adding Debezium Server example using MySQL and GCP PubSub DBZ-4471

    • Test Debezium against MSSQL 2016 DBZ-6693

    • Test Debezium against DB2 1.5.8.0 DBZ-6694

    • Add MSSQL 2022 to test matrix DBZ-6695

    • Edit test matrix after team evaluation DBZ-6696

    • Edit test automation to run both DB2 1.5.8.0 and 1.5.0.0a DBZ-6697

    • Refactor ElapsedTimeStrategy DBZ-6778

    • Provide configuration option to exclude extension attributes from a CloudEvent DBZ-6982

    • Further refactoring to correct downstream rendering of incremental snapshots topics DBZ-6997

    • Remove deprecated embedded engine code DBZ-7013

    • Enable replication slot advance check DBZ-7015

    • Add configuration option to CloudEventsConverter to retrieve id and type from headers DBZ-7016

    • Use optional schema for Timezone Converter tests DBZ-7020

    • Debezium Operator blogpost DBZ-7025

    • Apply 2.3.4 updates to main branch DBZ-7039

    • Update documentation with Postgres’s pgoutput limitation DBZ-7041

    • Use oracle container registry for MySQL images DBZ-7042

    • Updates to fix build of downstream doc DBZ-7046

    • Update operator dependencies and add qosdk platform bom DBZ-7048

    • Upgrade maven-surefire-plugin to 3.1.2 DBZ-7055

    • Consolidate resource labels and annotations DBZ-7064

    • Disable time sync in Testing farm test runs DBZ-7074

    \ No newline at end of file diff --git a/releases/2.6/index.html b/releases/2.6/index.html index 53bf96dc0f..b4cf70200a 100644 --- a/releases/2.6/index.html +++ b/releases/2.6/index.html @@ -1 +1 @@ - Debezium Release Series 2.6

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 8.0.x, 8.2
    Driver: 8.0.33
    MariaDB Database: 11.1.2
    Driver: 3.2.0
    MongoDB Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 10, 11, 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.6.0.Beta1

    2024-03-06
    Db2 for iSeries connector; Kafka 3.7.0; Faster multi-column key incremental snapshots; Metadata in watermarking signals; Redo SQL in Oracle change events metadata; New micro/nano-second precision source event timestamp; Arbitrary payload for outbox on Debezium Server; Snapshotter SPI and unified snapshot modes for all core connectors

    2.6.0.Alpha2

    2024-02-13
    New asynchornous implementation of Debezium Engine and Server; Refactored snapshotting to unify snapshot modes; Java 17 as compile-time dependency; Timezone conversion SMT applicable to metadata field; MongoDB provides additional metrics, UUID support as document key in incremental snapshot, post-images support and SSL configurtion; Alternative SQL Server query mode for improved performance; Reduced Debezium Server image size; `DECFLOAT` datatype supported in Db2

    2.6.0.Alpha1

    2024-01-18
    Removal of `replica_set` mode for MongoDB connector; Notification improvements; CloudEvents schema naming customization; Support for `NEW_ROW_AND_OLD_VALUES` capture mode in Spanner connector; Support for service loader classloading mode in Kafka Connect; DDL grammar fixes; Upgrade to Groovy 4; Fixed retrying logic for Debezium Engine
    \ No newline at end of file + Debezium Release Series 2.6

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 8.0.x, 8.2
    Driver: 8.0.33
    MariaDB Database: 11.1.2
    Driver: 3.2.0
    MongoDB Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 10, 11, 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.6.0.Beta1

    2024-03-06
    Db2 for iSeries connector; Kafka 3.7.0; Faster multi-column key incremental snapshots; Metadata in watermarking signals; Redo SQL in Oracle change events metadata; New micro/nano-second precision source event timestamp; Arbitrary payload for outbox on Debezium Server; Snapshotter SPI and unified snapshot modes for all core connectors

    2.6.0.Alpha2

    2024-02-13
    New asynchornous implementation of Debezium Engine and Server; Refactored snapshotting to unify snapshot modes; Java 17 as compile-time dependency; Timezone conversion SMT applicable to metadata field; MongoDB provides additional metrics, UUID support as document key in incremental snapshot, post-images support and SSL configurtion; Alternative SQL Server query mode for improved performance; Reduced Debezium Server image size; `DECFLOAT` datatype supported in Db2

    2.6.0.Alpha1

    2024-01-18
    Removal of `replica_set` mode for MongoDB connector; Notification improvements; CloudEvents schema naming customization; Support for `NEW_ROW_AND_OLD_VALUES` capture mode in Spanner connector; Support for service loader classloading mode in Kafka Connect; DDL grammar fixes; Upgrade to Groovy 4; Fixed retrying logic for Debezium Engine
    \ No newline at end of file diff --git a/releases/2.6/release-notes.html b/releases/2.6/release-notes.html index 04fd76b42a..38399e3aa1 100644 --- a/releases/2.6/release-notes.html +++ b/releases/2.6/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 2.6

    Release Notes for Debezium 2.6

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.6.2.Final (May 30th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Debezium oracle connectors needs to support IN clause for log miner query for more than 1000 tables as it creates performance issue DBZ-7847

    Fixes

    • After the initial deployment of Debezium, if a new table is added to MSSQL, its schema is was captured DBZ-7697

    • NO_DATA snapshot mode validation throw DebeziumException on restarts if snapshot is not completed DBZ-7780

    • DDL statement couldn’t be parsed DBZ-7788

    • old class reference in ibmi-connector services DBZ-7795

    • Ad-hoc blocking snapshot not working through file channeling without inserting a row in the database. DBZ-7806

    • Postgres: Potential data loss on connector restart DBZ-7816

    • Sql Server incorrectly applying quoted snapshot statement overrides DBZ-7828

    • Debezium JDBC Sink not handle order correctly DBZ-7830

    • Debezium MySQL Snapshot Connector Fails DBZ-7858

    • Support Oracle DDL Alter Audit Policy DBZ-7864

    • Support Oracle DDL Create Audit Policy DBZ-7865

    • Default value of error retries not interpreted correctly DBZ-7870

    • Avro schema compatibility issues when upgrading from Oracle Debezium 2.5.3.Final to 2.6.1.Final DBZ-7880

    • ParsingException (MySQL 8): create trigger if exists DBZ-7881

    • Debezium can’t handle columns with # in its name DBZ-7893

    • Oracle interval default values are not properly parsed DBZ-7898

    Other changes

    • Add c3p0 timeout configuration example to JDBC sink DBZ-7822

    • in the Cassandra documentation, there is a typo which should have been disable not Dusable. DBZ-7851

    • Too much logs after Debezium update DBZ-7871

    Release 2.6.1.Final (April 12nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Introduce RawToString transform for converting GUIDs stored in Oracle RAW(16) columns to Guid string DBZ-7753

    Fixes

    • Incremental Snapshot: read duplicate data when database has 1000 tables DBZ-7716

    • SQLServerConnectorIT.shouldNotStreamWhenUsingSnapshotModeInitialOnly check an old log message DBZ-7729

    • Snapshot fails with an error of invalid lock DBZ-7732

    • Column CON_ID queried on V$THREAD is not available in Oracle 11 DBZ-7737

    • Redis NOAUTH Authentication Error when DB index is specified DBZ-7740

    • Getting oldest transaction in Oracle buffer can cause NoSuchElementException with Infinispan DBZ-7741

    • The MySQL Debezium connector is not doing the snapshot after the reset. DBZ-7743

    • MongoDb connector doesn’t work with Load Balanced cluster DBZ-7744

    • Pod Security Context not set from template DBZ-7749

    • Apply MySQL binlog client version 0.29.1 - bugfix: read long value when deserializing gtid transaction’s length DBZ-7757

    • Change streaming exceptions are swallowed by BufferedChangeStreamCursor DBZ-7759

    • Sql-Server connector fails after initial start / processed record on subsequent starts DBZ-7765

    • Valid resume token is considered invalid which leads to new snapshot with some snapshot modes DBZ-7770

    Other changes

    There are no other changes in this release.

    Release 2.6.0.Final (April 2nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add documentation for Cassandra conncetor event.order.guarantee.mode property DBZ-7720

    Fixes

    • JDBC Storage does not support connection recovery DBZ-7258

    • Full incremental snapshot on SQL Server Table skipping block of 36 records DBZ-7359

    • Snapshot skipping records DBZ-7585

    • AsyncEmbeddedEngine doesn’t shut down threads properly DBZ-7661

    • RedisSchemaHistoryIT fails randomly DBZ-7692

    • RedisSchemaHistoryIT#testRedisConnectionRetry can run into infinite retry loop DBZ-7701

    • Fix system tests error when using Kafka 3.6.0 or less. DBZ-7708

    • Adjust fakeDNS starting to work both on Docker Desktop and Podman Desktop DBZ-7711

    • Fix mysql and postgresql system test assert failures DBZ-7713

    • Fix errors when runing system testsuite with mysql and jdbc tests together DBZ-7714

    • whitespace in filename of debezium-connector-ibmi DBZ-7721

    Other changes

    • Create Debezium design document for new implementation of DebeziumEngine DBZ-7073

    • Provide a generic snapshot mode configurable via connector properties DBZ-7497

    • Create JDBC sink connector system tests DBZ-7592

    • Bump up versions of dependencies in system testsuite DBZ-7630

    • Align snapshot modes for Informix DBZ-7699

    • Add tag for system test mongodb sharded replica_set mode DBZ-7706

    • Remove unneeded records copying from RecordProcessors DBZ-7710

    • Example-mongodb image - fix init script for images with base mongo:6.0 DBZ-7712

    • Remove dependency for mysql-connector test-jar in Redis tests DBZ-7723

    Release 2.6.0.CR1 (March 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    SQL Server by mistake did not honor store.only.captured.tables setting on the first connector start. This is rectified and the connector by default takes the snapshot of all table schemas (DBZ-7593).

    Vitess connector origninally used the timestamp of BEGIN message as the source timestamp. This has been changed to the usage of the COMMIT timestamp to reflect the behaviour of othe connectors (DBZ-7628).

    Debezium MySQL connector is upgraded to 8.3.0 JDBC driver. This driver is no longer compatible with MySQL 5. If you still need to use older MySQL versions please downgrade the driver after the installation (DBZ-7652).

    New features

    • Add XML support for OpenLogReplicator DBZ-6896

    • Use TRACE level log for Debezium Server in build time DBZ-7369

    • Implement Versioned interfaces in Transformation and Converter plugins DBZ-7618

    • Performance Issue in Cassandra Connector DBZ-7622

    • Provide partition mode to guarantee order of events in same partition DBZ-7631

    • Support empty debezium.sink.redis.user and debezium.sink.redis.password DBZ-7646

    Fixes

    • Log Mining Processor advances SCN incorrectly if LogMiner query returns no rows DBZ-6679

    • Oracle connector unable to find SCN after Exadata maintenance updates DBZ-7389

    • Oracle LOB requery on Primary Key change does not work for all column types DBZ-7458

    • Incorrect value of TIME(n) replicate from MySQL if the original value is negative DBZ-7594

    • Re-select Post Processor not working for complex types DBZ-7596

    • Null instead of toast placeholder written for binary types when "hex" mode configured DBZ-7599

    • Poor snapshot performance during schema snapshot DDL processing DBZ-7608

    • Re-select post processor performance DBZ-7611

    • Uncaught exception during config validation in Engine DBZ-7614

    • Enhanced event timestamp precision combined with ExtractNewRecordState not working DBZ-7615

    • Incremental snapshot query doesn’t honor message.key.columns order DBZ-7617

    • Metric ScnFreezeCount never increases DBZ-7619

    • JDBC connector does not process ByteBuffer field value DBZ-7620

    • Cassandra can have misaligned Jackson dependencies DBZ-7629

    • Numerci value without mantissa cannot be parsed DBZ-7643

    • Missing test annotation in PostgresConnectorIT DBZ-7649

    • Update QOSDK and Quarkus to fix vcs-url annotation CVE DBZ-7664

    • MySQL connector fails to parse DDL with RETURNING keyword DBZ-7666

    • Schema history comparator doesn’t handle SERVER_ID_KEY and TIMESTAMP_KEY properly DBZ-7690

    • Duplicate envar generated in operator bundle DBZ-7703

    Other changes

    • debezium-connector-jdbc occurred java.sql.SQLException: ORA-01461: can bind a LONG value only DBZ-6900

    • Align snapshot modes for MongoDB DBZ-7304

    • Align snapshot modes for DB2 DBZ-7305

    • Align all snapshot mode on all connectors DBZ-7308

    • Remove LogMiner continuous mining configuration option DBZ-7610

    • Update Quarkus Outbox to Quarkus 3.8.2 DBZ-7623

    • Upgrade Debezium Server to Quarkus 3.2.10 DBZ-7624

    • MongoDbReplicaSet and MongoDbShardedCluster should not create a new network for each builder instance by default DBZ-7626

    • Remove forgotten lombok code from system tests DBZ-7634

    • Add JDBC connector to artifact server image preparation DBZ-7644

    • Revert removal of Oracle LogMiner continuous mining DBZ-7645

    • Add documentation for MongoDB capture.mode.full.update.type property DBZ-7647

    • Fix MySQL image fetch for tests DBZ-7651

    • RedisSchemaHistoryIT continually fails DBZ-7654

    • Upgrade Quarkus Outbox Extension to Quarkus 3.8.3 DBZ-7656

    • Bump SQL Server test image to SQL Server 2022 DBZ-7657

    • Upgrade Debezium Server to Quarkus 3.2.11.Final DBZ-7662

    • Exclude jcl-over-slf4j dependency DBZ-7665

    Release 2.6.0.Beta1 (March 6th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium Oracle connector required manual installation of Oracle JDBC driver. This is no longer needed as the driver is packaged into the connector (DBZ-7364).

    The handling of MAVEN_DEP_DESTINATION environment has changed in connect-base container image. It is no longer used for downloading all dependencies including connectors but only for general purpose Maven Central located dependencies (DBZ-7551).

    New features

    • DB2/AS400 CDC using free jt400 library DBZ-2002

    • Use row value constructors to speed up multi-column queries for incremental snapshots DBZ-5071

    • Add metadata to watermarking signals DBZ-6858

    • Provide the Redo SQL as part of the change event DBZ-6960

    • Introduce a new microsecond/nanosecond precision timestamp in envelope DBZ-7107

    • Append LSN to txID DBZ-7454

    • Defer transaction capture until the first DML event occurs DBZ-7473

    • Support arbitrary payloads with outbox event router on debezium server DBZ-7512

    • Allow XStream error ORA-23656 to be retried DBZ-7559

    • Upgrade PostgreSQL driver to 42.6.1 DBZ-7571

    • Improved logging in case of PostgreSQL failure DBZ-7581

    Fixes

    • PostgreSQL connector doesn’t restart properly if database if not reachable DBZ-6236

    • NullPointerException in MongoDB connector DBZ-6434

    • Cassandra-4: Debezium connector stops producing events after a schema change DBZ-7363

    • Callout annotations rendered multiple times in downstream User Guide DBZ-7418

    • PreparedStatement leak in Oracle ReselectColumnsProcessor DBZ-7479

    • Allow special characters in signal table name DBZ-7480

    • Poor snapshot performance with new reselect SMT DBZ-7488

    • Debezium Oracle Connector ParsingException on XMLTYPE with lob.enabled=true DBZ-7489

    • Db2ReselectColumnsProcessorIT does not clean-up after test failures DBZ-7491

    • Completion callback called before connector stop DBZ-7496

    • Fix MySQL 8 event timestamp resolution logic error where fallback to seconds occurs erroneously for non-GTID events DBZ-7500

    • Remove incubating from Debezium documentation DBZ-7501

    • LogMinerHelperIT test shouldAddCorrectLogFiles randomly fails DBZ-7504

    • MySQl ReadOnlyIncrementalSnapshotIT testStopSnapshotKafkaSignal fails randomly DBZ-7508

    • Multi-threaded snapshot can enqueue changes out of order DBZ-7534

    • AsyncEmbeddedEngineTest#testTasksAreStoppedIfSomeFailsToStart fails randomly DBZ-7535

    • MongoDbReplicaSetAuthTest fails randomly DBZ-7537

    • ReadOnlyIncrementalSnapshotIT#testStopSnapshotKafkaSignal fails randomly DBZ-7553

    • Wait for Redis server to start DBZ-7564

    • Fix null event timestamp possible from FORMAT_DESCRIPTION and PREVIOUS_GTIDS events in MySqlStreamingChangeEventSource::setEventTimestamp DBZ-7567

    • AsyncEmbeddedEngineTest.testExecuteSmt fails randomly DBZ-7568

    • Debezium fails to compile with JDK 21 DBZ-7569

    • Redis tests fail randomly with JedisConnectionException: Unexpected end of stream DBZ-7576

    • RedisOffsetIT.testRedisConnectionRetry fails randomly DBZ-7578

    • Unavailable Toasted HSTORE Json Storage Mode column causes serialization failure DBZ-7582

    • Oracle Connector REST Extension Tests Fail DBZ-7597

    • Serialization of XML columns with NULL values fails using Infinispan Buffer DBZ-7598

    Other changes

    • MySQL config values validated twice DBZ-2015

    • Implement Hybrid Mining Strategy for Oracle, seamless DDL tracking with online catalog performance DBZ-3401

    • Tests in RHEL system testsuite throw errors without ocp cluster DBZ-7002

    • Move timeout configuration of MongoDbReplicaSet into Builder class DBZ-7054

    • Several Oracle tests fail regularly on Testing Farm infrastructure DBZ-7072

    • Remove obsolete MySQL version from TF DBZ-7173

    • Add Oracle 23 to CI test matrix DBZ-7195

    • Refactor sharded mongo ocp test DBZ-7221

    • Implement Snapshotter SPI Oracle DBZ-7302

    • Align snapshot modes for SQLServer DBZ-7303

    • Update snapshot mode documentation DBZ-7309

    • Upgrade ojdbc8 to 21.11.0.0 DBZ-7365

    • Document relation between column type and serializers for outbox DBZ-7368

    • Test testEmptyChangesProducesHeartbeat tends to fail randomly DBZ-7453

    • Align snapshot modes for PostgreSQL, MySQL, Oracle DBZ-7461

    • Document toggling MariaDB mode DBZ-7487

    • Add informix to main repository CI workflow DBZ-7490

    • Disable Oracle Integration Tests on GitHub DBZ-7494

    • Unify and adjust thread time outs DBZ-7495

    • Add "IF [NOT] EXISTS" DDL support for Oracle 23 DBZ-7498

    • Deployment examples show attribute name instead of its value DBZ-7499

    • Add ability to parse Map<String, Object> into ConfigProperties DBZ-7503

    • Support Oracle 23 SELECT without FROM DBZ-7505

    • Add Oracle 23 Annotation support for CREATE/ALTER TABLE statements DBZ-7506

    • TestContainers MongoDbReplicaSetAuthTest randomly fails DBZ-7507

    • Add Informix to Java Outreach DBZ-7510

    • Disable parallel record processing in DBZ server tests against Apicurio DBZ-7515

    • Add Start CDC hook in Reselect Columns PostProcessor Tests DBZ-7516

    • Remove the unused 'connector' parameter in the createSourceTask method in EmbeddedEngine.java DBZ-7517

    • Update commons-compress to 1.26.0 DBZ-7520

    • Promote JDBC sink from Incubating DBZ-7521

    • Allow to download containers also from Docker Hub DBZ-7524

    • Update rocketmq version DBZ-7525

    • signalLogWithEscapedCharacter fails with pgoutput-decoder DBZ-7526

    • Move RocketMQ dependency to debezium server DBZ-7527

    • Rework shouldGenerateSnapshotAndContinueStreaming assertions to deal with parallelization DBZ-7530

    • SQLServer tests taking long time due to database bad state DBZ-7541

    • Explicitly import jakarta dependencies that are excluded via glassfish filter DBZ-7545

    • Include RocketMQ and Redis container output into test log DBZ-7557

    • Numeric default value decimal scale mismatch DBZ-7562

    • Documentation conflict DBZ-7565

    • Upgrade Kafka to 3.7.0 DBZ-7574

    • Oracle connector always brings OLR dependencies DBZ-7579

    • Correct JDBC connector dependencies DBZ-7580

    • Reduce debug logs on tests DBZ-7588

    • Server SQS sink doesn’t support quick profile DBZ-7590

    Release 2.6.0.Alpha2 (February 13rd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    In 2.6, the task config format for Vitess connector was changed because the previous approach could de-stabilize Kafka Connect cluster (DBZ-7250). In some cases, when upgrading this change will cause NullPointerException and the error Couldn’t instantiate task <task-id> because it has an invalid task configuration. This task will not execute until reconfigured.. To fix this, delete & recreate each connector, using the same name & config. The connector(s) will start up and reuse the offsets last stored since they are using the same connector name (but will not try to reuse old task configs, which causes the error).

    New features

    • Add Number of records captured and processed as metrics for Debezium MongoDB Connector DBZ-6432

    • Add timezone conversion to metadata in Timezone Converter SMT DBZ-7022

    • Create new implementation of DebeziumEngine DBZ-7024

    • Error when fail converting value with internal schema DBZ-7143

    • Provide alternative direct query for faster execution DBZ-7273

    • MongoDb connector doesn’t use post-images DBZ-7299

    • Support DECFLOAT in Db2 connector DBZ-7362

    • Create PubSub example for DS deployed via operator DBZ-7370

    • Support connector scoped trustore/keystore for MongoDB DBZ-7379

    • Put transaction id in offsets only when it’s present DBZ-7380

    • Replace additional rolebinding definition in kubernetes.yml with @RBACRule DBZ-7381

    • Reduce size of docker image for Debezium 2.6 and up DBZ-7385

    • Allow the C3P0ConnectionProvider to be customized via configuration DBZ-7431

    • Need to be able to set an ordering key value DBZ-7435

    • Evaluate container image size for Debezium UI served by nginx DBZ-7447

    • Support UUID as document key for incremental snapshotting DBZ-7451

    • Consolidate version management DBZ-7455

    Fixes

    • Connector is getting stopped while processing bulk update(50k) records in debezium server 2.0.1.Final DBZ-6955

    • Debezium fails after table split operation DBZ-7360

    • Informix-Connector breaks on table with numerical default value DBZ-7372

    • MSSQL wrong default values in db schema for varchar, nvarchar, char columns DBZ-7374

    • Fix mysql version in mysql-replication container images DBZ-7384

    • Duplicate Debezium SMT transform DBZ-7416

    • Kinesis Sink Exception on PutRecord DBZ-7417

    • ParsingException (MariaDB Only): alterSpec drop foreign key with 'tablename.' prefix DBZ-7420

    • Poor performance with incremental snapshot with long list of tables DBZ-7421

    • Oracle Snapshot mistakenly uses LogMiner Offset Loader by default DBZ-7425

    • Reselect columns should source key values from after Struct when not using event-key sources DBZ-7429

    • Stopwatch throw NPE when toString is called without having statistics DBZ-7436

    • ReselectColumnsPostProcessor filter not use exclude predicate DBZ-7437

    • Adhoc snapshots are not triggered via File channel signal when submitted before the start of the application DBZ-7441

    • LogMiner batch size does not increase automatically DBZ-7445

    • Reduce string creation during SQL_REDO column read DBZ-7446

    • Oracle connector does not ignore reselection for excluded clob/blob columns DBZ-7456

    • The expected value pattern for table.include.list does not align with the documentation DBZ-7460

    • SQL Server queries with special characters fail after applying DBZ-7273 DBZ-7463

    • Signals actions are not loaded for SQLServer DBZ-7467

    • MySQL connector cannot parse table with WITH SYSTEM VERSIONING PARTITION BY SYSTEM_TIME DBZ-7468

    • Postgres images require clang-11 DBZ-7475

    • Make readiness and liveness proble timouts configurable DBZ-7476

    • Snapshotter SPI wrongly loaded on Debezium Server DBZ-7481

    Other changes

    • Remove obsolete MySQL version from TF DBZ-7173

    • Correctly handle METADATA records DBZ-7176

    • Move Snapshotter interface to core module as SPI DBZ-7300

    • Implement Snapshotter SPI MySQL/MariaDB DBZ-7301

    • Update the Debezium UI repo with local development infra and readme file. DBZ-7353

    • Update QOSDK to the latest version DBZ-7361

    • Upstream artefact server image preparation job failing DBZ-7371

    • Tests in RHEL system testsuite fail to initialize Kafka containers DBZ-7373

    • Fix logging for schema only recovery mode in mysql connector DBZ-7376

    • Records from snapshot delivered out of order DBZ-7382

    • Upgrade json-path to 2.9.0 DBZ-7383

    • Remove the use of Lombok in Debezium testsuite DBZ-7386

    • Use Java 17 as compile-time dependency DBZ-7387

    • Upgrade Outbox Extension to Quarkus 3.7.0 DBZ-7388

    • Add dependancy update bot to the UI Repo DBZ-7392

    • Fix the unit test cases DBZ-7423

    • Adopt Oracle 23 to Testing Farm DBZ-7439

    • Upgrade protobuf to 3.25.2 DBZ-7442

    • Correct debezium.sink.pubsub.flowcontrol.* variable names in Debezium Server docs site DBZ-7443

    • Upgrade Quarkus for Debezium Server to 3.2.9.Final DBZ-7449

    • Fix TimescaleDbDatabaseTest to run into test container DBZ-7452

    • Upgrade example-mongo image version to 6.0 DBZ-7457

    • Test Db2ReselectColumnsProcessorIT randomly fails DBZ-7471

    Release 2.6.0.Alpha1 (January 18th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MongoDB no longer supports replica_set connection mode (DBZ-7260).

    Re-select columns post-processor used for the key defined by message.key.columns for query building. This is not correct for most tables with primary key. The default behaviour has changed and the table primary key is used by default. A new configuration option was introduced to allow user to choose is primary key or generated key should be used (DBZ-7358).

    New features

    • Provide a public API from the connector implementations to retrieve the list of matching collections or tables based on the different include-/exclude lists DBZ-7167

    • Notifications are Missing the ID field in log channel DBZ-7249

    • Provide config option to customize CloudEvents.data schema name DBZ-7284

    • Clarify comment on serialization of document ids DBZ-7287

    • Unittest for hasCommitAlreadyBeenHandled in CommitScn Class DBZ-7288

    • Oracle Infinispan abandoned trasactions minor enhancements DBZ-7313

    • Add support for NEW_ROW_AND_OLD_VALUES value capture type. DBZ-7348

    Fixes

    • Empty object sent to GCP Pub/Sub after DELETE event DBZ-7098

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (subquery with UNION) DBZ-7259

    • Oracle DDL parsing error in PARTITION REFERENCE DBZ-7266

    • Enhance Oracle’s CREATE TABLE for Multiple Table Specifications DBZ-7286

    • PostgreSQL ad-hoc blocking snapshots fail when snapshot mode is "never" DBZ-7311

    • Ad-hoc blocking snapshot dies with "invalid snapshot identifier" immediately after connector creation DBZ-7312

    • Specifying a table include list with spaces between elements cause LogMiner queries to miss matches DBZ-7315

    • Debezium heartbeat.action.query does not start before writing to WAL: part 2 DBZ-7316

    • errors.max.retries is not used to stop retrying DBZ-7342

    • Oracle connector is ocasionally unable to find SCN DBZ-7345

    • Initial snapshot notifications should use full identifier. DBZ-7347

    • MySqlJdbcSinkDataTypeConverterIT#testBooleanDataTypeMapping fails DBZ-7355

    Other changes

    • Add service loader manifests for all Connect plugins DBZ-7298

    • Update Groovy version to 4.x DBZ-7340

    • Upgrade Antora to 3.1.7 DBZ-7344

    • Upgrade Outbox Extension to Quarkus 3.6.5 DBZ-7352

    \ No newline at end of file + Release Notes for Debezium 2.6

    Release Notes for Debezium 2.6

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.6.2.Final (May 30th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Debezium oracle connectors needs to support IN clause for log miner query for more than 1000 tables as it creates performance issue DBZ-7847

    Fixes

    • After the initial deployment of Debezium, if a new table is added to MSSQL, its schema is was captured DBZ-7697

    • NO_DATA snapshot mode validation throw DebeziumException on restarts if snapshot is not completed DBZ-7780

    • DDL statement couldn’t be parsed DBZ-7788

    • old class reference in ibmi-connector services DBZ-7795

    • Ad-hoc blocking snapshot not working through file channeling without inserting a row in the database. DBZ-7806

    • Postgres: Potential data loss on connector restart DBZ-7816

    • Sql Server incorrectly applying quoted snapshot statement overrides DBZ-7828

    • Debezium JDBC Sink not handle order correctly DBZ-7830

    • Debezium MySQL Snapshot Connector Fails DBZ-7858

    • Support Oracle DDL Alter Audit Policy DBZ-7864

    • Support Oracle DDL Create Audit Policy DBZ-7865

    • Default value of error retries not interpreted correctly DBZ-7870

    • Avro schema compatibility issues when upgrading from Oracle Debezium 2.5.3.Final to 2.6.1.Final DBZ-7880

    • ParsingException (MySQL 8): create trigger if exists DBZ-7881

    • Debezium can’t handle columns with # in its name DBZ-7893

    • Oracle interval default values are not properly parsed DBZ-7898

    Other changes

    • Add c3p0 timeout configuration example to JDBC sink DBZ-7822

    • in the Cassandra documentation, there is a typo which should have been disable not Dusable. DBZ-7851

    • Too much logs after Debezium update DBZ-7871

    Release 2.6.1.Final (April 12nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Introduce RawToString transform for converting GUIDs stored in Oracle RAW(16) columns to Guid string DBZ-7753

    Fixes

    • Incremental Snapshot: read duplicate data when database has 1000 tables DBZ-7716

    • SQLServerConnectorIT.shouldNotStreamWhenUsingSnapshotModeInitialOnly check an old log message DBZ-7729

    • Snapshot fails with an error of invalid lock DBZ-7732

    • Column CON_ID queried on V$THREAD is not available in Oracle 11 DBZ-7737

    • Redis NOAUTH Authentication Error when DB index is specified DBZ-7740

    • Getting oldest transaction in Oracle buffer can cause NoSuchElementException with Infinispan DBZ-7741

    • The MySQL Debezium connector is not doing the snapshot after the reset. DBZ-7743

    • MongoDb connector doesn’t work with Load Balanced cluster DBZ-7744

    • Pod Security Context not set from template DBZ-7749

    • Apply MySQL binlog client version 0.29.1 - bugfix: read long value when deserializing gtid transaction’s length DBZ-7757

    • Change streaming exceptions are swallowed by BufferedChangeStreamCursor DBZ-7759

    • Sql-Server connector fails after initial start / processed record on subsequent starts DBZ-7765

    • Valid resume token is considered invalid which leads to new snapshot with some snapshot modes DBZ-7770

    Other changes

    There are no other changes in this release.

    Release 2.6.0.Final (April 2nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add documentation for Cassandra conncetor event.order.guarantee.mode property DBZ-7720

    Fixes

    • JDBC Storage does not support connection recovery DBZ-7258

    • Full incremental snapshot on SQL Server Table skipping block of 36 records DBZ-7359

    • Snapshot skipping records DBZ-7585

    • AsyncEmbeddedEngine doesn’t shut down threads properly DBZ-7661

    • RedisSchemaHistoryIT fails randomly DBZ-7692

    • RedisSchemaHistoryIT#testRedisConnectionRetry can run into infinite retry loop DBZ-7701

    • Fix system tests error when using Kafka 3.6.0 or less. DBZ-7708

    • Adjust fakeDNS starting to work both on Docker Desktop and Podman Desktop DBZ-7711

    • Fix mysql and postgresql system test assert failures DBZ-7713

    • Fix errors when runing system testsuite with mysql and jdbc tests together DBZ-7714

    • whitespace in filename of debezium-connector-ibmi DBZ-7721

    Other changes

    • Create Debezium design document for new implementation of DebeziumEngine DBZ-7073

    • Provide a generic snapshot mode configurable via connector properties DBZ-7497

    • Create JDBC sink connector system tests DBZ-7592

    • Bump up versions of dependencies in system testsuite DBZ-7630

    • Align snapshot modes for Informix DBZ-7699

    • Add tag for system test mongodb sharded replica_set mode DBZ-7706

    • Remove unneeded records copying from RecordProcessors DBZ-7710

    • Example-mongodb image - fix init script for images with base mongo:6.0 DBZ-7712

    • Remove dependency for mysql-connector test-jar in Redis tests DBZ-7723

    Release 2.6.0.CR1 (March 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    SQL Server by mistake did not honor store.only.captured.tables setting on the first connector start. This is rectified and the connector by default takes the snapshot of all table schemas (DBZ-7593).

    Vitess connector origninally used the timestamp of BEGIN message as the source timestamp. This has been changed to the usage of the COMMIT timestamp to reflect the behaviour of othe connectors (DBZ-7628).

    Debezium MySQL connector is upgraded to 8.3.0 JDBC driver. This driver is no longer compatible with MySQL 5. If you still need to use older MySQL versions please downgrade the driver after the installation (DBZ-7652).

    New features

    • Add XML support for OpenLogReplicator DBZ-6896

    • Use TRACE level log for Debezium Server in build time DBZ-7369

    • Implement Versioned interfaces in Transformation and Converter plugins DBZ-7618

    • Performance Issue in Cassandra Connector DBZ-7622

    • Provide partition mode to guarantee order of events in same partition DBZ-7631

    • Support empty debezium.sink.redis.user and debezium.sink.redis.password DBZ-7646

    Fixes

    • Log Mining Processor advances SCN incorrectly if LogMiner query returns no rows DBZ-6679

    • Oracle connector unable to find SCN after Exadata maintenance updates DBZ-7389

    • Oracle LOB requery on Primary Key change does not work for all column types DBZ-7458

    • Incorrect value of TIME(n) replicate from MySQL if the original value is negative DBZ-7594

    • Re-select Post Processor not working for complex types DBZ-7596

    • Null instead of toast placeholder written for binary types when "hex" mode configured DBZ-7599

    • Poor snapshot performance during schema snapshot DDL processing DBZ-7608

    • Re-select post processor performance DBZ-7611

    • Uncaught exception during config validation in Engine DBZ-7614

    • Enhanced event timestamp precision combined with ExtractNewRecordState not working DBZ-7615

    • Incremental snapshot query doesn’t honor message.key.columns order DBZ-7617

    • Metric ScnFreezeCount never increases DBZ-7619

    • JDBC connector does not process ByteBuffer field value DBZ-7620

    • Cassandra can have misaligned Jackson dependencies DBZ-7629

    • Numerci value without mantissa cannot be parsed DBZ-7643

    • Missing test annotation in PostgresConnectorIT DBZ-7649

    • Update QOSDK and Quarkus to fix vcs-url annotation CVE DBZ-7664

    • MySQL connector fails to parse DDL with RETURNING keyword DBZ-7666

    • Schema history comparator doesn’t handle SERVER_ID_KEY and TIMESTAMP_KEY properly DBZ-7690

    • Duplicate envar generated in operator bundle DBZ-7703

    Other changes

    • debezium-connector-jdbc occurred java.sql.SQLException: ORA-01461: can bind a LONG value only DBZ-6900

    • Align snapshot modes for MongoDB DBZ-7304

    • Align snapshot modes for DB2 DBZ-7305

    • Align all snapshot mode on all connectors DBZ-7308

    • Remove LogMiner continuous mining configuration option DBZ-7610

    • Update Quarkus Outbox to Quarkus 3.8.2 DBZ-7623

    • Upgrade Debezium Server to Quarkus 3.2.10 DBZ-7624

    • MongoDbReplicaSet and MongoDbShardedCluster should not create a new network for each builder instance by default DBZ-7626

    • Remove forgotten lombok code from system tests DBZ-7634

    • Add JDBC connector to artifact server image preparation DBZ-7644

    • Revert removal of Oracle LogMiner continuous mining DBZ-7645

    • Add documentation for MongoDB capture.mode.full.update.type property DBZ-7647

    • Fix MySQL image fetch for tests DBZ-7651

    • RedisSchemaHistoryIT continually fails DBZ-7654

    • Upgrade Quarkus Outbox Extension to Quarkus 3.8.3 DBZ-7656

    • Bump SQL Server test image to SQL Server 2022 DBZ-7657

    • Upgrade Debezium Server to Quarkus 3.2.11.Final DBZ-7662

    • Exclude jcl-over-slf4j dependency DBZ-7665

    Release 2.6.0.Beta1 (March 6th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium Oracle connector required manual installation of Oracle JDBC driver. This is no longer needed as the driver is packaged into the connector (DBZ-7364).

    The handling of MAVEN_DEP_DESTINATION environment has changed in connect-base container image. It is no longer used for downloading all dependencies including connectors but only for general purpose Maven Central located dependencies (DBZ-7551).

    New features

    • DB2/AS400 CDC using free jt400 library DBZ-2002

    • Use row value constructors to speed up multi-column queries for incremental snapshots DBZ-5071

    • Add metadata to watermarking signals DBZ-6858

    • Provide the Redo SQL as part of the change event DBZ-6960

    • Introduce a new microsecond/nanosecond precision timestamp in envelope DBZ-7107

    • Append LSN to txID DBZ-7454

    • Defer transaction capture until the first DML event occurs DBZ-7473

    • Support arbitrary payloads with outbox event router on debezium server DBZ-7512

    • Allow XStream error ORA-23656 to be retried DBZ-7559

    • Upgrade PostgreSQL driver to 42.6.1 DBZ-7571

    • Improved logging in case of PostgreSQL failure DBZ-7581

    Fixes

    • PostgreSQL connector doesn’t restart properly if database if not reachable DBZ-6236

    • NullPointerException in MongoDB connector DBZ-6434

    • Cassandra-4: Debezium connector stops producing events after a schema change DBZ-7363

    • Callout annotations rendered multiple times in downstream User Guide DBZ-7418

    • PreparedStatement leak in Oracle ReselectColumnsProcessor DBZ-7479

    • Allow special characters in signal table name DBZ-7480

    • Poor snapshot performance with new reselect SMT DBZ-7488

    • Debezium Oracle Connector ParsingException on XMLTYPE with lob.enabled=true DBZ-7489

    • Db2ReselectColumnsProcessorIT does not clean-up after test failures DBZ-7491

    • Completion callback called before connector stop DBZ-7496

    • Fix MySQL 8 event timestamp resolution logic error where fallback to seconds occurs erroneously for non-GTID events DBZ-7500

    • Remove incubating from Debezium documentation DBZ-7501

    • LogMinerHelperIT test shouldAddCorrectLogFiles randomly fails DBZ-7504

    • MySQl ReadOnlyIncrementalSnapshotIT testStopSnapshotKafkaSignal fails randomly DBZ-7508

    • Multi-threaded snapshot can enqueue changes out of order DBZ-7534

    • AsyncEmbeddedEngineTest#testTasksAreStoppedIfSomeFailsToStart fails randomly DBZ-7535

    • MongoDbReplicaSetAuthTest fails randomly DBZ-7537

    • ReadOnlyIncrementalSnapshotIT#testStopSnapshotKafkaSignal fails randomly DBZ-7553

    • Wait for Redis server to start DBZ-7564

    • Fix null event timestamp possible from FORMAT_DESCRIPTION and PREVIOUS_GTIDS events in MySqlStreamingChangeEventSource::setEventTimestamp DBZ-7567

    • AsyncEmbeddedEngineTest.testExecuteSmt fails randomly DBZ-7568

    • Debezium fails to compile with JDK 21 DBZ-7569

    • Redis tests fail randomly with JedisConnectionException: Unexpected end of stream DBZ-7576

    • RedisOffsetIT.testRedisConnectionRetry fails randomly DBZ-7578

    • Unavailable Toasted HSTORE Json Storage Mode column causes serialization failure DBZ-7582

    • Oracle Connector REST Extension Tests Fail DBZ-7597

    • Serialization of XML columns with NULL values fails using Infinispan Buffer DBZ-7598

    Other changes

    • MySQL config values validated twice DBZ-2015

    • Implement Hybrid Mining Strategy for Oracle, seamless DDL tracking with online catalog performance DBZ-3401

    • Tests in RHEL system testsuite throw errors without ocp cluster DBZ-7002

    • Move timeout configuration of MongoDbReplicaSet into Builder class DBZ-7054

    • Several Oracle tests fail regularly on Testing Farm infrastructure DBZ-7072

    • Remove obsolete MySQL version from TF DBZ-7173

    • Add Oracle 23 to CI test matrix DBZ-7195

    • Refactor sharded mongo ocp test DBZ-7221

    • Implement Snapshotter SPI Oracle DBZ-7302

    • Align snapshot modes for SQLServer DBZ-7303

    • Update snapshot mode documentation DBZ-7309

    • Upgrade ojdbc8 to 21.11.0.0 DBZ-7365

    • Document relation between column type and serializers for outbox DBZ-7368

    • Test testEmptyChangesProducesHeartbeat tends to fail randomly DBZ-7453

    • Align snapshot modes for PostgreSQL, MySQL, Oracle DBZ-7461

    • Document toggling MariaDB mode DBZ-7487

    • Add informix to main repository CI workflow DBZ-7490

    • Disable Oracle Integration Tests on GitHub DBZ-7494

    • Unify and adjust thread time outs DBZ-7495

    • Add "IF [NOT] EXISTS" DDL support for Oracle 23 DBZ-7498

    • Deployment examples show attribute name instead of its value DBZ-7499

    • Add ability to parse Map<String, Object> into ConfigProperties DBZ-7503

    • Support Oracle 23 SELECT without FROM DBZ-7505

    • Add Oracle 23 Annotation support for CREATE/ALTER TABLE statements DBZ-7506

    • TestContainers MongoDbReplicaSetAuthTest randomly fails DBZ-7507

    • Add Informix to Java Outreach DBZ-7510

    • Disable parallel record processing in DBZ server tests against Apicurio DBZ-7515

    • Add Start CDC hook in Reselect Columns PostProcessor Tests DBZ-7516

    • Remove the unused 'connector' parameter in the createSourceTask method in EmbeddedEngine.java DBZ-7517

    • Update commons-compress to 1.26.0 DBZ-7520

    • Promote JDBC sink from Incubating DBZ-7521

    • Allow to download containers also from Docker Hub DBZ-7524

    • Update rocketmq version DBZ-7525

    • signalLogWithEscapedCharacter fails with pgoutput-decoder DBZ-7526

    • Move RocketMQ dependency to debezium server DBZ-7527

    • Rework shouldGenerateSnapshotAndContinueStreaming assertions to deal with parallelization DBZ-7530

    • SQLServer tests taking long time due to database bad state DBZ-7541

    • Explicitly import jakarta dependencies that are excluded via glassfish filter DBZ-7545

    • Include RocketMQ and Redis container output into test log DBZ-7557

    • Numeric default value decimal scale mismatch DBZ-7562

    • Documentation conflict DBZ-7565

    • Upgrade Kafka to 3.7.0 DBZ-7574

    • Oracle connector always brings OLR dependencies DBZ-7579

    • Correct JDBC connector dependencies DBZ-7580

    • Reduce debug logs on tests DBZ-7588

    • Server SQS sink doesn’t support quick profile DBZ-7590

    Release 2.6.0.Alpha2 (February 13rd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    In 2.6, the task config format for Vitess connector was changed because the previous approach could de-stabilize Kafka Connect cluster (DBZ-7250). In some cases, when upgrading this change will cause NullPointerException and the error Couldn’t instantiate task <task-id> because it has an invalid task configuration. This task will not execute until reconfigured.. To fix this, delete & recreate each connector, using the same name & config. The connector(s) will start up and reuse the offsets last stored since they are using the same connector name (but will not try to reuse old task configs, which causes the error).

    New features

    • Add Number of records captured and processed as metrics for Debezium MongoDB Connector DBZ-6432

    • Add timezone conversion to metadata in Timezone Converter SMT DBZ-7022

    • Create new implementation of DebeziumEngine DBZ-7024

    • Error when fail converting value with internal schema DBZ-7143

    • Provide alternative direct query for faster execution DBZ-7273

    • MongoDb connector doesn’t use post-images DBZ-7299

    • Support DECFLOAT in Db2 connector DBZ-7362

    • Create PubSub example for DS deployed via operator DBZ-7370

    • Support connector scoped trustore/keystore for MongoDB DBZ-7379

    • Put transaction id in offsets only when it’s present DBZ-7380

    • Replace additional rolebinding definition in kubernetes.yml with @RBACRule DBZ-7381

    • Reduce size of docker image for Debezium 2.6 and up DBZ-7385

    • Allow the C3P0ConnectionProvider to be customized via configuration DBZ-7431

    • Need to be able to set an ordering key value DBZ-7435

    • Evaluate container image size for Debezium UI served by nginx DBZ-7447

    • Support UUID as document key for incremental snapshotting DBZ-7451

    • Consolidate version management DBZ-7455

    Fixes

    • Connector is getting stopped while processing bulk update(50k) records in debezium server 2.0.1.Final DBZ-6955

    • Debezium fails after table split operation DBZ-7360

    • Informix-Connector breaks on table with numerical default value DBZ-7372

    • MSSQL wrong default values in db schema for varchar, nvarchar, char columns DBZ-7374

    • Fix mysql version in mysql-replication container images DBZ-7384

    • Duplicate Debezium SMT transform DBZ-7416

    • Kinesis Sink Exception on PutRecord DBZ-7417

    • ParsingException (MariaDB Only): alterSpec drop foreign key with 'tablename.' prefix DBZ-7420

    • Poor performance with incremental snapshot with long list of tables DBZ-7421

    • Oracle Snapshot mistakenly uses LogMiner Offset Loader by default DBZ-7425

    • Reselect columns should source key values from after Struct when not using event-key sources DBZ-7429

    • Stopwatch throw NPE when toString is called without having statistics DBZ-7436

    • ReselectColumnsPostProcessor filter not use exclude predicate DBZ-7437

    • Adhoc snapshots are not triggered via File channel signal when submitted before the start of the application DBZ-7441

    • LogMiner batch size does not increase automatically DBZ-7445

    • Reduce string creation during SQL_REDO column read DBZ-7446

    • Oracle connector does not ignore reselection for excluded clob/blob columns DBZ-7456

    • The expected value pattern for table.include.list does not align with the documentation DBZ-7460

    • SQL Server queries with special characters fail after applying DBZ-7273 DBZ-7463

    • Signals actions are not loaded for SQLServer DBZ-7467

    • MySQL connector cannot parse table with WITH SYSTEM VERSIONING PARTITION BY SYSTEM_TIME DBZ-7468

    • Postgres images require clang-11 DBZ-7475

    • Make readiness and liveness proble timouts configurable DBZ-7476

    • Snapshotter SPI wrongly loaded on Debezium Server DBZ-7481

    Other changes

    • Remove obsolete MySQL version from TF DBZ-7173

    • Correctly handle METADATA records DBZ-7176

    • Move Snapshotter interface to core module as SPI DBZ-7300

    • Implement Snapshotter SPI MySQL/MariaDB DBZ-7301

    • Update the Debezium UI repo with local development infra and readme file. DBZ-7353

    • Update QOSDK to the latest version DBZ-7361

    • Upstream artefact server image preparation job failing DBZ-7371

    • Tests in RHEL system testsuite fail to initialize Kafka containers DBZ-7373

    • Fix logging for schema only recovery mode in mysql connector DBZ-7376

    • Records from snapshot delivered out of order DBZ-7382

    • Upgrade json-path to 2.9.0 DBZ-7383

    • Remove the use of Lombok in Debezium testsuite DBZ-7386

    • Use Java 17 as compile-time dependency DBZ-7387

    • Upgrade Outbox Extension to Quarkus 3.7.0 DBZ-7388

    • Add dependancy update bot to the UI Repo DBZ-7392

    • Fix the unit test cases DBZ-7423

    • Adopt Oracle 23 to Testing Farm DBZ-7439

    • Upgrade protobuf to 3.25.2 DBZ-7442

    • Correct debezium.sink.pubsub.flowcontrol.* variable names in Debezium Server docs site DBZ-7443

    • Upgrade Quarkus for Debezium Server to 3.2.9.Final DBZ-7449

    • Fix TimescaleDbDatabaseTest to run into test container DBZ-7452

    • Upgrade example-mongo image version to 6.0 DBZ-7457

    • Test Db2ReselectColumnsProcessorIT randomly fails DBZ-7471

    Release 2.6.0.Alpha1 (January 18th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.6.1 and has been tested with version 3.6.1 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.6.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.6.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.6.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    MongoDB no longer supports replica_set connection mode (DBZ-7260).

    Re-select columns post-processor used for the key defined by message.key.columns for query building. This is not correct for most tables with primary key. The default behaviour has changed and the table primary key is used by default. A new configuration option was introduced to allow user to choose is primary key or generated key should be used (DBZ-7358).

    New features

    • Provide a public API from the connector implementations to retrieve the list of matching collections or tables based on the different include-/exclude lists DBZ-7167

    • Notifications are Missing the ID field in log channel DBZ-7249

    • Provide config option to customize CloudEvents.data schema name DBZ-7284

    • Clarify comment on serialization of document ids DBZ-7287

    • Unittest for hasCommitAlreadyBeenHandled in CommitScn Class DBZ-7288

    • Oracle Infinispan abandoned trasactions minor enhancements DBZ-7313

    • Add support for NEW_ROW_AND_OLD_VALUES value capture type. DBZ-7348

    Fixes

    • Empty object sent to GCP Pub/Sub after DELETE event DBZ-7098

    • Debezium-ddl-parser crashes on parsing MySQL DDL statement (subquery with UNION) DBZ-7259

    • Oracle DDL parsing error in PARTITION REFERENCE DBZ-7266

    • Enhance Oracle’s CREATE TABLE for Multiple Table Specifications DBZ-7286

    • PostgreSQL ad-hoc blocking snapshots fail when snapshot mode is "never" DBZ-7311

    • Ad-hoc blocking snapshot dies with "invalid snapshot identifier" immediately after connector creation DBZ-7312

    • Specifying a table include list with spaces between elements cause LogMiner queries to miss matches DBZ-7315

    • Debezium heartbeat.action.query does not start before writing to WAL: part 2 DBZ-7316

    • errors.max.retries is not used to stop retrying DBZ-7342

    • Oracle connector is ocasionally unable to find SCN DBZ-7345

    • Initial snapshot notifications should use full identifier. DBZ-7347

    • MySqlJdbcSinkDataTypeConverterIT#testBooleanDataTypeMapping fails DBZ-7355

    Other changes

    • Add service loader manifests for all Connect plugins DBZ-7298

    • Update Groovy version to 4.x DBZ-7340

    • Upgrade Antora to 3.1.7 DBZ-7344

    • Upgrade Outbox Extension to Quarkus 3.6.5 DBZ-7352

    \ No newline at end of file diff --git a/releases/2.7/index.html b/releases/2.7/index.html index c64fe35811..41edd7b7ff 100644 --- a/releases/2.7/index.html +++ b/releases/2.7/index.html @@ -1 +1 @@ - Debezium Release Series 2.7

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.7.2.Final

    2024-09-05
    Oracle connector provides additional details about abandoned transactions; Informix connector improves support for DECIMAL datatype; Kafka sink can timeout on delivery failures; SQL Server supports signalling and notifications for multiple tasks; MariaDB is included in Debezium Server distribution

    2.7.1.Final

    2024-08-08
    MariaDB added to Debezium Connect image; `RENAME TABLE` DDL fixes for Oracle/MariaDB; Ad-hoc snapshot fixes; Incorrect closing of transformations in Debezium Engine; Improved mining ranges calculation in Oracle connector; Fixed Vitess epoch calculations; JDBC Sink now handles partition reassignments

    2.7.0.Beta1

    2024-06-06
    Incubating support for Db2 on z/OS; Added support for authentication and encryption for NATS JetStream sink; MariaDB sink supports `upsert` insert mode; Snapshot artifacts are deployed to the new location; JMX Exporter is embedded in Debezium Server image; Metrics configurable via Debezium Operator; Improved handling and propagation of trasnaction metadata in Vitess connector; Configurable delay between snapshot to streaming transitition

    2.7.0.Alpha2

    2024-05-10

    2.7.0.Alpha1

    2024-04-25
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connector provides total ordering information in each event; Performance improvements in Cassandra connector; Helm chart installer for Debezium Operator; Perfromance improvements in Debezium Engine/Server; Confiugrable timeout for JDBC queries
    \ No newline at end of file + Debezium Release Series 2.7

    stable

    Tested Versions

    Java 11+
    Kafka Connect 2.x, 3.x
    MySQL Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    2.7.2.Final

    2024-09-05
    Oracle connector provides additional details about abandoned transactions; Informix connector improves support for DECIMAL datatype; Kafka sink can timeout on delivery failures; SQL Server supports signalling and notifications for multiple tasks; MariaDB is included in Debezium Server distribution

    2.7.1.Final

    2024-08-08
    MariaDB added to Debezium Connect image; `RENAME TABLE` DDL fixes for Oracle/MariaDB; Ad-hoc snapshot fixes; Incorrect closing of transformations in Debezium Engine; Improved mining ranges calculation in Oracle connector; Fixed Vitess epoch calculations; JDBC Sink now handles partition reassignments

    2.7.0.Beta1

    2024-06-06
    Incubating support for Db2 on z/OS; Added support for authentication and encryption for NATS JetStream sink; MariaDB sink supports `upsert` insert mode; Snapshot artifacts are deployed to the new location; JMX Exporter is embedded in Debezium Server image; Metrics configurable via Debezium Operator; Improved handling and propagation of trasnaction metadata in Vitess connector; Configurable delay between snapshot to streaming transitition

    2.7.0.Alpha2

    2024-05-10

    2.7.0.Alpha1

    2024-04-25
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connector provides total ordering information in each event; Performance improvements in Cassandra connector; Helm chart installer for Debezium Operator; Perfromance improvements in Debezium Engine/Server; Confiugrable timeout for JDBC queries
    \ No newline at end of file diff --git a/releases/2.7/release-notes.html b/releases/2.7/release-notes.html index ebb7e53095..daed5a4525 100644 --- a/releases/2.7/release-notes.html +++ b/releases/2.7/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 2.7

    Release Notes for Debezium 2.7

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.7.4.Final (December 11st 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Allow skipping exceptions related to DML parser errors DBZ-8208

    • Support int/bigint arrays in reselect colums postprocessors DBZ-8212

    • Log the record key when debezium fails to send the record to Kafka DBZ-8282

    • RowsScanned JMX metric for MongoDB differs from relational connectors DBZ-8359

    Fixes

    • Oracle DDL parsing will fail if the DDL ends with a new line character DBZ-7040

    • Custom convert (all to strings) and SQLServer default '0' type issue DBZ-7045

    • Missing documentation for MongoDb SSL configuration DBZ-7927

    • Conditionalization implemented for single-sourcing MySQL/MariaDB content isn’t working as expected DBZ-8094

    • Error writing data to target database. (Caused by: java.lang.RuntimeException: org.postgresql.util.PSQLException: The column index is out of range: 140, number of columns: 139.) DBZ-8221

    • Debezium Server messages not being sent to Pub/Sub after restart DBZ-8236

    • JDBC Sink truncate event also add event to updateBufferByTable DBZ-8247

    • Performance Regression in Debezium Server Kafka after DBZ-7575 fix DBZ-8251

    • Error Prone library included in MySQL connector DBZ-8258

    • Debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-8259

    • DDL statement couldn’t be parsed. 'mismatched input 'NOCACHE' expecting {'AS', 'USAGE', ';'} DBZ-8262

    • journal processing loops after journal offset reset DBZ-8265

    • Embedded MySqlConnector "Unable to find minimal snapshot lock mode" since 2.5.4.Final DBZ-8271

    • Reselect Post Processor not working when pkey of type uuid etc. DBZ-8277

    • BinlogStreamingChangeEventSource totalRecordCounter is never updated DBZ-8290

    • Race condition in stop-snapshot signal DBZ-8303

    • ReselectPostProcessor fails when reselecting columns from Oracle DBZ-8304

    • Debezium MySQL DDL parser: SECONDARY_ENGINE=RAPID does not support DBZ-8305

    • Oracle DDL failure - subpartition list clause does not support in-memory clause DBZ-8315

    • DDL statement couldn’t be parsed DBZ-8316

    • Oracle connector: archive.log.only.mode stop working after reach SYSDATE SCN DBZ-8345

    • Object name is not in the list of S3 schema history fields DBZ-8366

    • Upgrade protobuf dependencies to avoid potential vulnerability DBZ-8371

    • ExtractNewRecordState transform: NPE when processing non-envelope records DBZ-8393

    • Oracle LogMiner metric OldestScnAgeInMilliseconds can be negative DBZ-8395

    • ExtractNewDocumentStateTestIT fails randomly DBZ-8397

    • Oracle OBJECT_ID lookup and cause high CPU and latency in Hybrid mining mode DBZ-8399

    • Engine shutdown may get stuck when error is thrown during connector stop DBZ-8414

    • JdbcOffsetBackingStore does not release lock of debezium_offset_storage gracefully DBZ-8423

    • Installation documentation typo on download link DBZ-8429

    • Asycn engine fails with NPE when transformation returns null DBZ-8434

    • Formatting characters render in descriptions of Oracle log.mining properties DBZ-8450

    Other changes

    • Fix conditionalization in shared MariaDB/MySQL file DBZ-8254

    • Add Oracle FUTC license DBZ-8260

    • Remove Oracle libs from product assembly package DBZ-8261

    • debezium-connector-binlog does not need MariaDB dependency DBZ-8263

    • Provide subset package for Debezium Server DBZ-8264

    • BlockingSnapshotIT streamingMetricsResumeAfterBlockingSnapshot fails after backport DBZ-8267

    • Correct description of the all_tables option for the PG publication.autocreate.mode property DBZ-8268

    • Test docs for productization and fix broken links and rendering errors DBZ-8284

    • Formatting characters render literally in docs DBZ-8293

    • Backport two fixes to binlog client version 0.31.x DBZ-8387

    • Log SCN existence check may throw ORA-01291 if a recent checkpoint occurred DBZ-8389

    Release 2.7.3.Final (September 20th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Avoid 3 second delay in Oracle when one of the RAC nodes is offline DBZ-8177

    • Support MariaDB 11.4.3 DBZ-8226

    • Support BLOB with EMPTY_BLOB() as default DBZ-8248

    Fixes

    • Make ORA-00600 - krvrdccs10 automatically retriable DBZ-5009

    • DDL statement couldn’t be parsed: REVOKE IF EXISTS DBZ-8209

    • Oracle TableSchemaBuilder provides wrong column name in error message DBZ-8217

    • Debezium does not restart automatically after throwing an ORA-00600 krvrdccs30 error DBZ-8223

    • JDBC sink doesn’t include fields as per documentation DBZ-8224

    • Unbounded number of processing threads in async engine DBZ-8237

    • Streaming metrics are stuck after an ad-hoc blocking snapshot DBZ-8238

    • DDL statement couldn’t be parsed with IF EXISTS DBZ-8240

    • Random engine factory used by default DBZ-8241

    Other changes

    • Implement separate source and sink connector sections in documentation navigation DBZ-8220

    • Broken link to Streams doc about configuring logging DBZ-8231

    • Document passthrough hibernate.* properties for the JDBC connector DBZ-8232

    • AbstractConnectorTest consumeRecordsUntil may prematurely exit loop DBZ-8250

    Release 2.7.2.Final (September 5th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    When using Kafka sink it was possible that Debezium Server was blocked indefinitely in case of broken connection to Kafka broker. This is no longer the case as a delivery timeout with default value was introduced (DBZ-7575).

    When SQL Server connector was configured to run in mutliple tasks the JMX signalling an notification was not working for all of them. This is fixed but the JMX naming was updated to reflect the task id (DBZ-8137).

    New features

    • Log additional details about abandoned transactions DBZ-8044

    • Support DECIMAL(p) Floating Point DBZ-8114

    • Truncate byte buffer should return a new array DBZ-8189

    Fixes

    • Incremental snapshots don’t work with CloudEvent converter DBZ-7601

    • Incremental snapshot fails with NPE if surrogate key doesn’t exist DBZ-7797

    • Postgres connector - null value processing for "money" type column. DBZ-8027

    • Using snapshot.include.collection.list with Oracle raises NullPointerException DBZ-8032

    • Performance degradation when reconstructing (log.mining.stragtegy hybrid mode) DBZ-8071

    • ORA-00600: internal error code, arguments: [krvrdGetUID:2], [18446744073709551614], [], [], [], [], [], [], [], [], [], [] DBZ-8125

    • ConvertingFailureIT#shouldFailConversionTimeTypeWithConnectModeWhenFailMode fails randomly DBZ-8128

    • Unpredicatable ordering of table rows during insertion causing foreign key error DBZ-8130

    • schema_only crashes ibmi Connector DBZ-8131

    • Support larger database.server.id values DBZ-8134

    • Open redo thread consistency check can lead to ORA-01291 - missing logfile DBZ-8144

    • SchemaOnlyRecoverySnapshotter not registered as an SPI service implementation DBZ-8147

    • When stopping the Oracle rac node the Debezium server throws an expections - ORA-12514: Cannot connect to database and retries DBZ-8149

    • Issue with Debezium Snapshot: DateTimeParseException with plugin pgoutput DBZ-8150

    • JDBC connector validation fails when using record_value with no primary.key.fields DBZ-8151

    • Taking RAC node offline and back online can lead to thread inconsistency DBZ-8162

    • Postgres JSONB Fields are not supported with Reselect Post Processor DBZ-8168

    • NullPointerException (schemaUpdateCache is null) when restarting Oracle engine DBZ-8187

    • XStream may fail to attach on retry if previous attempt failed DBZ-8188

    • Exclude Oracle 23 VECSYS tablespace from capture DBZ-8198

    • AbstractProcessorTest uses an incorrect database name when run against Oracle 23 Free edition DBZ-8199

    Other changes

    • Documentation for signals provides incorrect data-collection format for some connectors DBZ-8090

    • Add LogMiner start mining session retry attempt counter to logs DBZ-8143

    • Reduce logging verbosity of XStream DML event data DBZ-8148

    • Add MariaDB connector server distribution DBZ-8186

    • Reduce log verbosity of OpenLogReplicator SCN confirmation DBZ-8201

    Release 2.7.1.Final (August 8th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Update third-party LICENSE with LGPL forMariaDB Connector/J DBZ-8099

    Fixes

    • Clarify that Oracle connector does not read from physical standby DBZ-7895

    • JdbcSinkTask doesn’t clear offsets on stop DBZ-7946

    • Issue with Hybrid mode and DDL change DBZ-7991

    • Incorrect offset/history property mapping generatated DBZ-8007

    • StackOverflow exception on incremental snapshot DBZ-8011

    • JDBC primary.key.fields cannot be empty when i set insert.mode to upsert and primary.key.mode record_value DBZ-8018

    • Unable to acquire buffer lock, buffer queue is likely full DBZ-8022

    • ORA-65090: operation only allowed in a container database when connecting to a non-CDB database DBZ-8023

    • Added type to Prometheus JMX exporter DBZ-8036

    • Add kafka.producer metrics to debezium-server jmx exporter config DBZ-8037

    • "Unexpected input: ." when snapshot incremental empty Database DBZ-8050

    • Oracle connector inconsistency in redo log switches DBZ-8055

    • Blocking snapshot can fail due to CommunicationsException DBZ-8058

    • ParsingException (MySQL/MariaDB): rename table syntax DBZ-8066

    • Oracle histogram metrics are no longer printed in logs correctly DBZ-8068

    • In hybrid log.mining.strategy reconstruction logs should be set to DEBUG DBZ-8070

    • Support capturing BLOB column types during snapshot for MySQL/MariaDB DBZ-8076

    • Vitess transaction Epoch should not reset to zero when tx ID is missing DBZ-8087

    • After changing the column datatype from int to float the Debezium fails to round it and i get a null value for this field in the stream DBZ-8089

    • MySQL and MariaDB keyword YES cannot be parsed as a column name DBZ-8092

    • NotificationIT tests seemingly seem to fail due to stepping on one another DBZ-8100

    • ORA-26928 - Unable to communicate with XStream apply coordinator process should be retriable DBZ-8102

    • Transformations are not closed in emebdded engine DBZ-8106

    • Don’t close connection after loading timescale metadata in TimescaleDb SMT DBZ-8109

    • Primary Key Update/ Snapshot Race Condition DBZ-8113

    • Docs: connect-log4j.properties instead log4j.properties DBZ-8117

    • Recalculating mining range upper bounds causes getScnFromTimestamp to fail DBZ-8119

    Other changes

    • Document new MariaDB connector DBZ-7786

    • Bump Infinispan to 14.0.29.Final DBZ-8010

    • Add disclaimer that PostProcessors and CustomConverters are Debezium source connectors only DBZ-8031

    • Conditionalize reference to the MySQL default value in description of schema.history.internal.store.only.captured.databases.ddl DBZ-8081

    • Add MariaDB to debezium/connect image DBZ-8088

    • Converters documentation uses incorrect examples DBZ-8104

    • Remove reference to`additional condition` signal parameter from ad hoc snapshots doc DBZ-8107

    • TimescaleDbDatabaseTest.shouldTransformCompressedChunks is failing DBZ-8123

    • Update Oracle connector doc to describe options for restricting access permissions for the Debezium LogMiner user DBZ-8124

    Release 2.7.0.Final (June 28th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Suport collection scoped streaming DBZ-7760

    • Allow stoping DS instance by scaling to zero via annotation DBZ-7953

    • Support heartbeat events in vitess-connector DBZ-7962

    Fixes

    • Unable to use resume token of some documents with composite IDs DBZ-6522

    • Quarkus generates VSC kubernetes annotations pointing to a fork DBZ-7415

    • MongoDB documentation still mentions replica_set connection mode DBZ-7862

    • Clarify documentation for log.mining.archive.destination.name Oracle configuration property DBZ-7939

    • Ad-hoc snapshot raises ORA-00911 when table name uses non-standard characters requiring quotations DBZ-7942

    • Exclude signaling data collection from the snapshot process DBZ-7944

    • JDBC sink time tests fail due to increased precision with SQL Server DBZ-7949

    • Commit is not called after DDLs in JDBC stores DBZ-7951

    • Database case sensitivity can lead to NullPointerException on column lookups DBZ-7956

    • Debezium ibmi connector drops journal entries DBZ-7957

    • Error counter reset in poll() can cause infinite retries DBZ-7964

    • Oracle DDL parser fails using NOMONITORING clause DBZ-7967

    • Invalid default DSimage used for nighly/snapshot operator version DBZ-7970

    • Mongo Oversized Document FAQ documentation issue DBZ-7987

    • Cassandra connector does not work with 2.6.1 Server DBZ-7988

    • Testcontainers tests fails on newer versions of Docker DBZ-7986

    Other changes

    • Document the use of the "source" prefix usage for table name formats DBZ-6618

    • Remove dependency on MySQL driver, add custom CharacterSet Mapper DBZ-7783

    • Rebase website-builder image on Ruby 3.2 DBZ-7916

    • Warn about incompatible usage of read.only property for PostgreSQL DBZ-7947

    • Run JDBC sink tests for any relational connector pull requests DBZ-7948

    • Bump Quarkus to 3.12.0 for Quarkus Outbox Extension DBZ-7961

    • Bump Hibernate dependency to 6.4.8.Final DBZ-7969

    • Deprecated EmbeddedEngine DBZ-7976

    Release 2.7.0.Beta2 (June 13rd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    PostgreSQL 10 and 11 are end of life for one or more years. Debezium now supports both versions as best effort only. No testing is done and no issues specific to these two versions will be proactively fixed by the core team. The community contributions will be accepted (DBZ-7128).

    New features

    • Allow skipping of table row count in snapshot phase DBZ-7640

    • Add heartbeat action query to SQL Server DBZ-7801

    • Read-only incremental snapshots for PostgreSQL DBZ-7917

    • Support truncation of byte arrays DBZ-7925

    Fixes

    • Oracle property column.truncate.to.length.chars does not support length zero DBZ-7079

    • Debezium Server cannot pass empty string to Kafka config DBZ-7767

    • Unable To Exclude Column Using Configuration DBZ-7813

    • Oracle connector failed to work when the table name contains single quote DBZ-7831

    • Incorrect documentation for CE type DBZ-7926

    • DDL statement couldn’t be parsed DBZ-7931

    • SQL Server default value resolution for TIME data types causes precision loss DBZ-7933

    • Incorrect name of JMX Exporter k8s service DBZ-7934

    • OlrNetworkClient does not disconnect when error occurs DBZ-7935

    • Multiple ARRAY types in single table causing error DBZ-7938

    Other changes

    • Create REST extension tests and infrastructure DBZ-7785

    • Introduce ROW_ID for OpenLogReplicator changes DBZ-7823

    • Test SqlServerConnectorIT#shouldStopRetriableRestartsAtConfiguredMaximumDuringStreaming is failing DBZ-7936

    • Add exception details when engine fails to commit offset DBZ-7937

    Release 2.7.0.Beta1 (June 6th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium snapshots were orginally being deployed to the legacy oss.sonatype.org Sonatype infrastucture. This has changed and they are now deployed to the new s01.oss.sonatype.org infrastructure (DBZ-7641).

    Oracle NUMERIC type with zero scale ignored decimal.handling.mode setting. With this version the type is properly propagated as configured type. This can cause an issue with upgrade for deployments with strict schema registry compatibility rules (DBZ-7882).

    New features

    • debezium-connector-db2: z/OS integration DBZ-4812

    • Ensure vgtid remains local to shards streamed by task DBZ-6721

    • Decompose provide.transaction.metadata into components DBZ-6722

    • Handle Enum as String or Int DBZ-7792

    • MariaDB target should support 'upsert' for insert.mode DBZ-7874

    • Add support for user/password authentication in Nats Jetstream sink adapter DBZ-7876

    • Allow customizing ObjectMapper in JsonSerde DBZ-7887

    • Add configurable delay after successful snapshot before starting streaming DBZ-7902

    • Enhancing the threads utility class for broader use DBZ-7906

    • Include Prometheus JMX exporter in Debezium Server distribution DBZ-7913

    • Add support for TLS auth for NATS JetStream sink DBZ-7922

    Fixes

    • Debezium 1.9.2 cannot capture field that is date type of postgres DBZ-5182

    • Rewrite batch statement not supported for jdbc debezium sink DBZ-7845

    • Debezium MySQL Snapshot Connector Fails DBZ-7858

    • Reduce enum array allocation DBZ-7859

    • Snapshot retrying logic falls into infinite retry loop DBZ-7860

    • Bump Java in Debezium Server images DBZ-7861

    • Default value of error retries not interpreted correctly DBZ-7870

    • Avro schema compatibility issues when upgrading from Oracle Debezium 2.5.3.Final to 2.6.1.Final DBZ-7880

    • Improve offset and history storage configuration DBZ-7884

    • Oracle Debezium Connector cannot startup due to failing incremental snapshot DBZ-7886

    • Multiple completed reading from a capture instance notifications DBZ-7889

    • Debezium can’t handle columns with # in its name DBZ-7893

    • Oracle interval default values are not properly parsed DBZ-7898

    • Debezium server unable to shutdown on pubsub error DBZ-7904

    • Handle gtid without range only single position DBZ-7905

    • Oracle connector cannot parse SUBPARTITION when altering table DBZ-7908

    • Make column exclude use keyspace not shard DBZ-7910

    • The explanation in the documentation is insufficient - metric DBZ-7912

    Other changes

    • Too much logs after Debezium update DBZ-7871

    • Test Geometry and Geography columns during Initial Snapshot DBZ-7878

    • Remove incubating note from post-processors index.adoc file DBZ-7890

    Release 2.7.0.Alpha2 (May 10th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add ROW_ID as part of source information block for LogMiner sources DBZ-4332

    • Support for ARRAY data types for postgres DBZ-7752

    • Enhance documentation about using tags to customize connector snapshot/streaming MBean names DBZ-7800

    • Allow specifying the log mining flush table with an optional schema DBZ-7819

    • Added nats JWT/seed authentication config options DBZ-7829

    • Update Debezium container images to use Fedora 38 DBZ-7832

    • Debezium oracle connectors needs to support IN clause for log miner query for more than 1000 tables as it creates performance issue DBZ-7847

    Fixes

    • Debezium User Guide 2.5.4: Grammatical error DBZ-7803

    • > io.debezium.text.ParsingException : SQL Contains Partition DBZ-7805

    • Ad-hoc blocking snapshot not working through file channeling without inserting a row in the database. DBZ-7806

    • Postgres: Potential data loss on connector restart DBZ-7816

    • Abnormal Behavior in Debezium Monitoring Example - mysql connector DBZ-7826

    • DEBEZIUM_VERSION is wrongly set to 2.6.0.Alpha1 DBZ-7827

    • Sql Server incorrectly applying quoted snapshot statement overrides DBZ-7828

    • Debezium JDBC Sink not handle order correctly DBZ-7830

    • Fix typo in documentation/modules doc DBZ-7844

    • Support Oracle DDL Alter Audit Policy DBZ-7864

    • Support Oracle DDL Create Audit Policy DBZ-7865

    Other changes

    • Log exception details early in case MySQL keep-alive causes deadlock on shutdown DBZ-7570

    • Extend mongodb system tests with ssl option DBZ-7605

    • Refactor oracle connector test job DBZ-7807

    • Fix anchor ID collisions that prevent downstream documentation from building DBZ-7815

    • Add c3p0 timeout configuration example to JDBC sink DBZ-7822

    • Move undocumented option to internal DBZ-7833

    • Increase wait for shouldGracefullySkipObjectBasedTables on XStream DBZ-7839

    • Bump Debezium Server to Quarkus 3.8.3 DBZ-7841

    • Bump Outbox Extension to Quarkus 3.10.0 DBZ-7842

    • in the Cassandra documentation, there is a typo which should have been disable not Dusable. DBZ-7851

    Release 2.7.0.Alpha1 (April 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    JDBC queries could hang indefinitely in case of database error communication. A configurable timeout was introduced to prevent the issue (DBZ-7616).

    SQL Server connector by default processed all transactions available during each data poll. This could lead to out of memeory errors for databases with a large traffic. The default value was changed to limit the number of transactions to 500 (DBZ-7750).

    New features

    • Support helm chart installation of debezium-operator DBZ-7116

    • Adding additional-conditions into Incremental Snapshot to MongoDB DBZ-7138

    • Document MongoDB connector inactivity pause and it’s performance implications DBZ-7147

    • Move MariaDB connector from MySQL to its own separate connector DBZ-7693

    • Mongodb Delete events should have _id in the payload DBZ-7695

    • Provide option to encode ordering metadata in each record DBZ-7698

    • Manage escaping when captured table are determined for snapshot DBZ-7718

    • Performance improve in KafkaRecordEmitter class DBZ-7722

    • Introduce RawToString transform for converting GUIDs stored in Oracle RAW(16) columns to Guid string DBZ-7753

    • Improve NLS character set support by including orai18n dependency DBZ-7761

    • Vitess Connector should have parity with MySQL’s time.precision.mode DBZ-7773

    • Document potential null values in the after field for lookup full update type DBZ-7789

    • Fix invalid date/timestamp check & logging level DBZ-7811

    Fixes

    • Builtin database name filter is incorrectly applied only to collections instead of databases in snapshot DBZ-7485

    • After the initial deployment of Debezium, if a new table is added to MSSQL, its schema is was captured DBZ-7697

    • The test is failing because wrong topics are used DBZ-7715

    • Incremental Snapshot: read duplicate data when database has 1000 tables DBZ-7716

    • Handle instability in JDBC connector system tests DBZ-7726

    • SQLServerConnectorIT.shouldNotStreamWhenUsingSnapshotModeInitialOnly check an old log message DBZ-7729

    • Fix MongoDB unwrap SMT test DBZ-7731

    • Snapshot fails with an error of invalid lock DBZ-7732

    • Column CON_ID queried on V$THREAD is not available in Oracle 11 DBZ-7737

    • Redis NOAUTH Authentication Error when DB index is specified DBZ-7740

    • Getting oldest transaction in Oracle buffer can cause NoSuchElementException with Infinispan DBZ-7741

    • The MySQL Debezium connector is not doing the snapshot after the reset. DBZ-7743

    • MongoDb connector doesn’t work with Load Balanced cluster DBZ-7744

    • Align unwrap tests to respect AT LEAST ONCE delivery DBZ-7746

    • Exclude reload4j from Kafka connect dependencies in system testsuite DBZ-7748

    • Pod Security Context not set from template DBZ-7749

    • Apply MySQL binlog client version 0.29.1 - bugfix: read long value when deserializing gtid transaction’s length DBZ-7757

    • Change streaming exceptions are swallowed by BufferedChangeStreamCursor DBZ-7759

    • Sql-Server connector fails after initial start / processed record on subsequent starts DBZ-7765

    • Valid resume token is considered invalid which leads to new snapshot with some snapshot modes DBZ-7770

    • NO_DATA snapshot mode validation throw DebeziumException on restarts if snapshot is not completed DBZ-7780

    • DDL statement couldn’t be parsed DBZ-7788

    • old class reference in ibmi-connector services DBZ-7795

    • Documentation for Debezium Scripting mentions wrong property DBZ-7798

    Other changes

    • Update documenation for embedded engine DBZ-7632

    • Implement basic JHM perf. tests for async engine DBZ-7633

    • Upgrade Debezium Quarkus Outbox to Quarkus 3.9.2 DBZ-7663

    • Move LogPositionValidator outside the JdbcConnection DBZ-7717

    • Fix mongodb image in system tests DBZ-7739

    • Refactor exporting to CloudEvents DBZ-7755

    • Use thread cap only for deault value DBZ-7763

    • Evaluate cached thread pool as the default option for async embedded engine DBZ-7764

    • Create JMH benchmark for engine record processing DBZ-7776

    • Improve processing speed of async engine processors which use List#get() DBZ-7777

    • Disable renovate in debezium-ui DBZ-7814

    \ No newline at end of file + Release Notes for Debezium 2.7

    Release Notes for Debezium 2.7

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 2.7.4.Final (December 11st 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Allow skipping exceptions related to DML parser errors DBZ-8208

    • Support int/bigint arrays in reselect colums postprocessors DBZ-8212

    • Log the record key when debezium fails to send the record to Kafka DBZ-8282

    • RowsScanned JMX metric for MongoDB differs from relational connectors DBZ-8359

    Fixes

    • Oracle DDL parsing will fail if the DDL ends with a new line character DBZ-7040

    • Custom convert (all to strings) and SQLServer default '0' type issue DBZ-7045

    • Missing documentation for MongoDb SSL configuration DBZ-7927

    • Conditionalization implemented for single-sourcing MySQL/MariaDB content isn’t working as expected DBZ-8094

    • Error writing data to target database. (Caused by: java.lang.RuntimeException: org.postgresql.util.PSQLException: The column index is out of range: 140, number of columns: 139.) DBZ-8221

    • Debezium Server messages not being sent to Pub/Sub after restart DBZ-8236

    • JDBC Sink truncate event also add event to updateBufferByTable DBZ-8247

    • Performance Regression in Debezium Server Kafka after DBZ-7575 fix DBZ-8251

    • Error Prone library included in MySQL connector DBZ-8258

    • Debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-8259

    • DDL statement couldn’t be parsed. 'mismatched input 'NOCACHE' expecting {'AS', 'USAGE', ';'} DBZ-8262

    • journal processing loops after journal offset reset DBZ-8265

    • Embedded MySqlConnector "Unable to find minimal snapshot lock mode" since 2.5.4.Final DBZ-8271

    • Reselect Post Processor not working when pkey of type uuid etc. DBZ-8277

    • BinlogStreamingChangeEventSource totalRecordCounter is never updated DBZ-8290

    • Race condition in stop-snapshot signal DBZ-8303

    • ReselectPostProcessor fails when reselecting columns from Oracle DBZ-8304

    • Debezium MySQL DDL parser: SECONDARY_ENGINE=RAPID does not support DBZ-8305

    • Oracle DDL failure - subpartition list clause does not support in-memory clause DBZ-8315

    • DDL statement couldn’t be parsed DBZ-8316

    • Oracle connector: archive.log.only.mode stop working after reach SYSDATE SCN DBZ-8345

    • Object name is not in the list of S3 schema history fields DBZ-8366

    • Upgrade protobuf dependencies to avoid potential vulnerability DBZ-8371

    • ExtractNewRecordState transform: NPE when processing non-envelope records DBZ-8393

    • Oracle LogMiner metric OldestScnAgeInMilliseconds can be negative DBZ-8395

    • ExtractNewDocumentStateTestIT fails randomly DBZ-8397

    • Oracle OBJECT_ID lookup and cause high CPU and latency in Hybrid mining mode DBZ-8399

    • Engine shutdown may get stuck when error is thrown during connector stop DBZ-8414

    • JdbcOffsetBackingStore does not release lock of debezium_offset_storage gracefully DBZ-8423

    • Installation documentation typo on download link DBZ-8429

    • Asycn engine fails with NPE when transformation returns null DBZ-8434

    • Formatting characters render in descriptions of Oracle log.mining properties DBZ-8450

    Other changes

    • Fix conditionalization in shared MariaDB/MySQL file DBZ-8254

    • Add Oracle FUTC license DBZ-8260

    • Remove Oracle libs from product assembly package DBZ-8261

    • debezium-connector-binlog does not need MariaDB dependency DBZ-8263

    • Provide subset package for Debezium Server DBZ-8264

    • BlockingSnapshotIT streamingMetricsResumeAfterBlockingSnapshot fails after backport DBZ-8267

    • Correct description of the all_tables option for the PG publication.autocreate.mode property DBZ-8268

    • Test docs for productization and fix broken links and rendering errors DBZ-8284

    • Formatting characters render literally in docs DBZ-8293

    • Backport two fixes to binlog client version 0.31.x DBZ-8387

    • Log SCN existence check may throw ORA-01291 if a recent checkpoint occurred DBZ-8389

    Release 2.7.3.Final (September 20th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Avoid 3 second delay in Oracle when one of the RAC nodes is offline DBZ-8177

    • Support MariaDB 11.4.3 DBZ-8226

    • Support BLOB with EMPTY_BLOB() as default DBZ-8248

    Fixes

    • Make ORA-00600 - krvrdccs10 automatically retriable DBZ-5009

    • DDL statement couldn’t be parsed: REVOKE IF EXISTS DBZ-8209

    • Oracle TableSchemaBuilder provides wrong column name in error message DBZ-8217

    • Debezium does not restart automatically after throwing an ORA-00600 krvrdccs30 error DBZ-8223

    • JDBC sink doesn’t include fields as per documentation DBZ-8224

    • Unbounded number of processing threads in async engine DBZ-8237

    • Streaming metrics are stuck after an ad-hoc blocking snapshot DBZ-8238

    • DDL statement couldn’t be parsed with IF EXISTS DBZ-8240

    • Random engine factory used by default DBZ-8241

    Other changes

    • Implement separate source and sink connector sections in documentation navigation DBZ-8220

    • Broken link to Streams doc about configuring logging DBZ-8231

    • Document passthrough hibernate.* properties for the JDBC connector DBZ-8232

    • AbstractConnectorTest consumeRecordsUntil may prematurely exit loop DBZ-8250

    Release 2.7.2.Final (September 5th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    When using Kafka sink it was possible that Debezium Server was blocked indefinitely in case of broken connection to Kafka broker. This is no longer the case as a delivery timeout with default value was introduced (DBZ-7575).

    When SQL Server connector was configured to run in mutliple tasks the JMX signalling an notification was not working for all of them. This is fixed but the JMX naming was updated to reflect the task id (DBZ-8137).

    New features

    • Log additional details about abandoned transactions DBZ-8044

    • Support DECIMAL(p) Floating Point DBZ-8114

    • Truncate byte buffer should return a new array DBZ-8189

    Fixes

    • Incremental snapshots don’t work with CloudEvent converter DBZ-7601

    • Incremental snapshot fails with NPE if surrogate key doesn’t exist DBZ-7797

    • Postgres connector - null value processing for "money" type column. DBZ-8027

    • Using snapshot.include.collection.list with Oracle raises NullPointerException DBZ-8032

    • Performance degradation when reconstructing (log.mining.stragtegy hybrid mode) DBZ-8071

    • ORA-00600: internal error code, arguments: [krvrdGetUID:2], [18446744073709551614], [], [], [], [], [], [], [], [], [], [] DBZ-8125

    • ConvertingFailureIT#shouldFailConversionTimeTypeWithConnectModeWhenFailMode fails randomly DBZ-8128

    • Unpredicatable ordering of table rows during insertion causing foreign key error DBZ-8130

    • schema_only crashes ibmi Connector DBZ-8131

    • Support larger database.server.id values DBZ-8134

    • Open redo thread consistency check can lead to ORA-01291 - missing logfile DBZ-8144

    • SchemaOnlyRecoverySnapshotter not registered as an SPI service implementation DBZ-8147

    • When stopping the Oracle rac node the Debezium server throws an expections - ORA-12514: Cannot connect to database and retries DBZ-8149

    • Issue with Debezium Snapshot: DateTimeParseException with plugin pgoutput DBZ-8150

    • JDBC connector validation fails when using record_value with no primary.key.fields DBZ-8151

    • Taking RAC node offline and back online can lead to thread inconsistency DBZ-8162

    • Postgres JSONB Fields are not supported with Reselect Post Processor DBZ-8168

    • NullPointerException (schemaUpdateCache is null) when restarting Oracle engine DBZ-8187

    • XStream may fail to attach on retry if previous attempt failed DBZ-8188

    • Exclude Oracle 23 VECSYS tablespace from capture DBZ-8198

    • AbstractProcessorTest uses an incorrect database name when run against Oracle 23 Free edition DBZ-8199

    Other changes

    • Documentation for signals provides incorrect data-collection format for some connectors DBZ-8090

    • Add LogMiner start mining session retry attempt counter to logs DBZ-8143

    • Reduce logging verbosity of XStream DML event data DBZ-8148

    • Add MariaDB connector server distribution DBZ-8186

    • Reduce log verbosity of OpenLogReplicator SCN confirmation DBZ-8201

    Release 2.7.1.Final (August 8th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Update third-party LICENSE with LGPL forMariaDB Connector/J DBZ-8099

    Fixes

    • Clarify that Oracle connector does not read from physical standby DBZ-7895

    • JdbcSinkTask doesn’t clear offsets on stop DBZ-7946

    • Issue with Hybrid mode and DDL change DBZ-7991

    • Incorrect offset/history property mapping generatated DBZ-8007

    • StackOverflow exception on incremental snapshot DBZ-8011

    • JDBC primary.key.fields cannot be empty when i set insert.mode to upsert and primary.key.mode record_value DBZ-8018

    • Unable to acquire buffer lock, buffer queue is likely full DBZ-8022

    • ORA-65090: operation only allowed in a container database when connecting to a non-CDB database DBZ-8023

    • Added type to Prometheus JMX exporter DBZ-8036

    • Add kafka.producer metrics to debezium-server jmx exporter config DBZ-8037

    • "Unexpected input: ." when snapshot incremental empty Database DBZ-8050

    • Oracle connector inconsistency in redo log switches DBZ-8055

    • Blocking snapshot can fail due to CommunicationsException DBZ-8058

    • ParsingException (MySQL/MariaDB): rename table syntax DBZ-8066

    • Oracle histogram metrics are no longer printed in logs correctly DBZ-8068

    • In hybrid log.mining.strategy reconstruction logs should be set to DEBUG DBZ-8070

    • Support capturing BLOB column types during snapshot for MySQL/MariaDB DBZ-8076

    • Vitess transaction Epoch should not reset to zero when tx ID is missing DBZ-8087

    • After changing the column datatype from int to float the Debezium fails to round it and i get a null value for this field in the stream DBZ-8089

    • MySQL and MariaDB keyword YES cannot be parsed as a column name DBZ-8092

    • NotificationIT tests seemingly seem to fail due to stepping on one another DBZ-8100

    • ORA-26928 - Unable to communicate with XStream apply coordinator process should be retriable DBZ-8102

    • Transformations are not closed in emebdded engine DBZ-8106

    • Don’t close connection after loading timescale metadata in TimescaleDb SMT DBZ-8109

    • Primary Key Update/ Snapshot Race Condition DBZ-8113

    • Docs: connect-log4j.properties instead log4j.properties DBZ-8117

    • Recalculating mining range upper bounds causes getScnFromTimestamp to fail DBZ-8119

    Other changes

    • Document new MariaDB connector DBZ-7786

    • Bump Infinispan to 14.0.29.Final DBZ-8010

    • Add disclaimer that PostProcessors and CustomConverters are Debezium source connectors only DBZ-8031

    • Conditionalize reference to the MySQL default value in description of schema.history.internal.store.only.captured.databases.ddl DBZ-8081

    • Add MariaDB to debezium/connect image DBZ-8088

    • Converters documentation uses incorrect examples DBZ-8104

    • Remove reference to`additional condition` signal parameter from ad hoc snapshots doc DBZ-8107

    • TimescaleDbDatabaseTest.shouldTransformCompressedChunks is failing DBZ-8123

    • Update Oracle connector doc to describe options for restricting access permissions for the Debezium LogMiner user DBZ-8124

    Release 2.7.0.Final (June 28th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Suport collection scoped streaming DBZ-7760

    • Allow stoping DS instance by scaling to zero via annotation DBZ-7953

    • Support heartbeat events in vitess-connector DBZ-7962

    Fixes

    • Unable to use resume token of some documents with composite IDs DBZ-6522

    • Quarkus generates VSC kubernetes annotations pointing to a fork DBZ-7415

    • MongoDB documentation still mentions replica_set connection mode DBZ-7862

    • Clarify documentation for log.mining.archive.destination.name Oracle configuration property DBZ-7939

    • Ad-hoc snapshot raises ORA-00911 when table name uses non-standard characters requiring quotations DBZ-7942

    • Exclude signaling data collection from the snapshot process DBZ-7944

    • JDBC sink time tests fail due to increased precision with SQL Server DBZ-7949

    • Commit is not called after DDLs in JDBC stores DBZ-7951

    • Database case sensitivity can lead to NullPointerException on column lookups DBZ-7956

    • Debezium ibmi connector drops journal entries DBZ-7957

    • Error counter reset in poll() can cause infinite retries DBZ-7964

    • Oracle DDL parser fails using NOMONITORING clause DBZ-7967

    • Invalid default DSimage used for nighly/snapshot operator version DBZ-7970

    • Mongo Oversized Document FAQ documentation issue DBZ-7987

    • Cassandra connector does not work with 2.6.1 Server DBZ-7988

    • Testcontainers tests fails on newer versions of Docker DBZ-7986

    Other changes

    • Document the use of the "source" prefix usage for table name formats DBZ-6618

    • Remove dependency on MySQL driver, add custom CharacterSet Mapper DBZ-7783

    • Rebase website-builder image on Ruby 3.2 DBZ-7916

    • Warn about incompatible usage of read.only property for PostgreSQL DBZ-7947

    • Run JDBC sink tests for any relational connector pull requests DBZ-7948

    • Bump Quarkus to 3.12.0 for Quarkus Outbox Extension DBZ-7961

    • Bump Hibernate dependency to 6.4.8.Final DBZ-7969

    • Deprecated EmbeddedEngine DBZ-7976

    Release 2.7.0.Beta2 (June 13rd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Beta2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Beta2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Beta2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    PostgreSQL 10 and 11 are end of life for one or more years. Debezium now supports both versions as best effort only. No testing is done and no issues specific to these two versions will be proactively fixed by the core team. The community contributions will be accepted (DBZ-7128).

    New features

    • Allow skipping of table row count in snapshot phase DBZ-7640

    • Add heartbeat action query to SQL Server DBZ-7801

    • Read-only incremental snapshots for PostgreSQL DBZ-7917

    • Support truncation of byte arrays DBZ-7925

    Fixes

    • Oracle property column.truncate.to.length.chars does not support length zero DBZ-7079

    • Debezium Server cannot pass empty string to Kafka config DBZ-7767

    • Unable To Exclude Column Using Configuration DBZ-7813

    • Oracle connector failed to work when the table name contains single quote DBZ-7831

    • Incorrect documentation for CE type DBZ-7926

    • DDL statement couldn’t be parsed DBZ-7931

    • SQL Server default value resolution for TIME data types causes precision loss DBZ-7933

    • Incorrect name of JMX Exporter k8s service DBZ-7934

    • OlrNetworkClient does not disconnect when error occurs DBZ-7935

    • Multiple ARRAY types in single table causing error DBZ-7938

    Other changes

    • Create REST extension tests and infrastructure DBZ-7785

    • Introduce ROW_ID for OpenLogReplicator changes DBZ-7823

    • Test SqlServerConnectorIT#shouldStopRetriableRestartsAtConfiguredMaximumDuringStreaming is failing DBZ-7936

    • Add exception details when engine fails to commit offset DBZ-7937

    Release 2.7.0.Beta1 (June 6th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium snapshots were orginally being deployed to the legacy oss.sonatype.org Sonatype infrastucture. This has changed and they are now deployed to the new s01.oss.sonatype.org infrastructure (DBZ-7641).

    Oracle NUMERIC type with zero scale ignored decimal.handling.mode setting. With this version the type is properly propagated as configured type. This can cause an issue with upgrade for deployments with strict schema registry compatibility rules (DBZ-7882).

    New features

    • debezium-connector-db2: z/OS integration DBZ-4812

    • Ensure vgtid remains local to shards streamed by task DBZ-6721

    • Decompose provide.transaction.metadata into components DBZ-6722

    • Handle Enum as String or Int DBZ-7792

    • MariaDB target should support 'upsert' for insert.mode DBZ-7874

    • Add support for user/password authentication in Nats Jetstream sink adapter DBZ-7876

    • Allow customizing ObjectMapper in JsonSerde DBZ-7887

    • Add configurable delay after successful snapshot before starting streaming DBZ-7902

    • Enhancing the threads utility class for broader use DBZ-7906

    • Include Prometheus JMX exporter in Debezium Server distribution DBZ-7913

    • Add support for TLS auth for NATS JetStream sink DBZ-7922

    Fixes

    • Debezium 1.9.2 cannot capture field that is date type of postgres DBZ-5182

    • Rewrite batch statement not supported for jdbc debezium sink DBZ-7845

    • Debezium MySQL Snapshot Connector Fails DBZ-7858

    • Reduce enum array allocation DBZ-7859

    • Snapshot retrying logic falls into infinite retry loop DBZ-7860

    • Bump Java in Debezium Server images DBZ-7861

    • Default value of error retries not interpreted correctly DBZ-7870

    • Avro schema compatibility issues when upgrading from Oracle Debezium 2.5.3.Final to 2.6.1.Final DBZ-7880

    • Improve offset and history storage configuration DBZ-7884

    • Oracle Debezium Connector cannot startup due to failing incremental snapshot DBZ-7886

    • Multiple completed reading from a capture instance notifications DBZ-7889

    • Debezium can’t handle columns with # in its name DBZ-7893

    • Oracle interval default values are not properly parsed DBZ-7898

    • Debezium server unable to shutdown on pubsub error DBZ-7904

    • Handle gtid without range only single position DBZ-7905

    • Oracle connector cannot parse SUBPARTITION when altering table DBZ-7908

    • Make column exclude use keyspace not shard DBZ-7910

    • The explanation in the documentation is insufficient - metric DBZ-7912

    Other changes

    • Too much logs after Debezium update DBZ-7871

    • Test Geometry and Geography columns during Initial Snapshot DBZ-7878

    • Remove incubating note from post-processors index.adoc file DBZ-7890

    Release 2.7.0.Alpha2 (May 10th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add ROW_ID as part of source information block for LogMiner sources DBZ-4332

    • Support for ARRAY data types for postgres DBZ-7752

    • Enhance documentation about using tags to customize connector snapshot/streaming MBean names DBZ-7800

    • Allow specifying the log mining flush table with an optional schema DBZ-7819

    • Added nats JWT/seed authentication config options DBZ-7829

    • Update Debezium container images to use Fedora 38 DBZ-7832

    • Debezium oracle connectors needs to support IN clause for log miner query for more than 1000 tables as it creates performance issue DBZ-7847

    Fixes

    • Debezium User Guide 2.5.4: Grammatical error DBZ-7803

    • > io.debezium.text.ParsingException : SQL Contains Partition DBZ-7805

    • Ad-hoc blocking snapshot not working through file channeling without inserting a row in the database. DBZ-7806

    • Postgres: Potential data loss on connector restart DBZ-7816

    • Abnormal Behavior in Debezium Monitoring Example - mysql connector DBZ-7826

    • DEBEZIUM_VERSION is wrongly set to 2.6.0.Alpha1 DBZ-7827

    • Sql Server incorrectly applying quoted snapshot statement overrides DBZ-7828

    • Debezium JDBC Sink not handle order correctly DBZ-7830

    • Fix typo in documentation/modules doc DBZ-7844

    • Support Oracle DDL Alter Audit Policy DBZ-7864

    • Support Oracle DDL Create Audit Policy DBZ-7865

    Other changes

    • Log exception details early in case MySQL keep-alive causes deadlock on shutdown DBZ-7570

    • Extend mongodb system tests with ssl option DBZ-7605

    • Refactor oracle connector test job DBZ-7807

    • Fix anchor ID collisions that prevent downstream documentation from building DBZ-7815

    • Add c3p0 timeout configuration example to JDBC sink DBZ-7822

    • Move undocumented option to internal DBZ-7833

    • Increase wait for shouldGracefullySkipObjectBasedTables on XStream DBZ-7839

    • Bump Debezium Server to Quarkus 3.8.3 DBZ-7841

    • Bump Outbox Extension to Quarkus 3.10.0 DBZ-7842

    • in the Cassandra documentation, there is a typo which should have been disable not Dusable. DBZ-7851

    Release 2.7.0.Alpha1 (April 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 2.7.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 2.7.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 2.7.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    JDBC queries could hang indefinitely in case of database error communication. A configurable timeout was introduced to prevent the issue (DBZ-7616).

    SQL Server connector by default processed all transactions available during each data poll. This could lead to out of memeory errors for databases with a large traffic. The default value was changed to limit the number of transactions to 500 (DBZ-7750).

    New features

    • Support helm chart installation of debezium-operator DBZ-7116

    • Adding additional-conditions into Incremental Snapshot to MongoDB DBZ-7138

    • Document MongoDB connector inactivity pause and it’s performance implications DBZ-7147

    • Move MariaDB connector from MySQL to its own separate connector DBZ-7693

    • Mongodb Delete events should have _id in the payload DBZ-7695

    • Provide option to encode ordering metadata in each record DBZ-7698

    • Manage escaping when captured table are determined for snapshot DBZ-7718

    • Performance improve in KafkaRecordEmitter class DBZ-7722

    • Introduce RawToString transform for converting GUIDs stored in Oracle RAW(16) columns to Guid string DBZ-7753

    • Improve NLS character set support by including orai18n dependency DBZ-7761

    • Vitess Connector should have parity with MySQL’s time.precision.mode DBZ-7773

    • Document potential null values in the after field for lookup full update type DBZ-7789

    • Fix invalid date/timestamp check & logging level DBZ-7811

    Fixes

    • Builtin database name filter is incorrectly applied only to collections instead of databases in snapshot DBZ-7485

    • After the initial deployment of Debezium, if a new table is added to MSSQL, its schema is was captured DBZ-7697

    • The test is failing because wrong topics are used DBZ-7715

    • Incremental Snapshot: read duplicate data when database has 1000 tables DBZ-7716

    • Handle instability in JDBC connector system tests DBZ-7726

    • SQLServerConnectorIT.shouldNotStreamWhenUsingSnapshotModeInitialOnly check an old log message DBZ-7729

    • Fix MongoDB unwrap SMT test DBZ-7731

    • Snapshot fails with an error of invalid lock DBZ-7732

    • Column CON_ID queried on V$THREAD is not available in Oracle 11 DBZ-7737

    • Redis NOAUTH Authentication Error when DB index is specified DBZ-7740

    • Getting oldest transaction in Oracle buffer can cause NoSuchElementException with Infinispan DBZ-7741

    • The MySQL Debezium connector is not doing the snapshot after the reset. DBZ-7743

    • MongoDb connector doesn’t work with Load Balanced cluster DBZ-7744

    • Align unwrap tests to respect AT LEAST ONCE delivery DBZ-7746

    • Exclude reload4j from Kafka connect dependencies in system testsuite DBZ-7748

    • Pod Security Context not set from template DBZ-7749

    • Apply MySQL binlog client version 0.29.1 - bugfix: read long value when deserializing gtid transaction’s length DBZ-7757

    • Change streaming exceptions are swallowed by BufferedChangeStreamCursor DBZ-7759

    • Sql-Server connector fails after initial start / processed record on subsequent starts DBZ-7765

    • Valid resume token is considered invalid which leads to new snapshot with some snapshot modes DBZ-7770

    • NO_DATA snapshot mode validation throw DebeziumException on restarts if snapshot is not completed DBZ-7780

    • DDL statement couldn’t be parsed DBZ-7788

    • old class reference in ibmi-connector services DBZ-7795

    • Documentation for Debezium Scripting mentions wrong property DBZ-7798

    Other changes

    • Update documenation for embedded engine DBZ-7632

    • Implement basic JHM perf. tests for async engine DBZ-7633

    • Upgrade Debezium Quarkus Outbox to Quarkus 3.9.2 DBZ-7663

    • Move LogPositionValidator outside the JdbcConnection DBZ-7717

    • Fix mongodb image in system tests DBZ-7739

    • Refactor exporting to CloudEvents DBZ-7755

    • Use thread cap only for deault value DBZ-7763

    • Evaluate cached thread pool as the default option for async embedded engine DBZ-7764

    • Create JMH benchmark for engine record processing DBZ-7776

    • Improve processing speed of async engine processors which use List#get() DBZ-7777

    • Disable renovate in debezium-ui DBZ-7814

    \ No newline at end of file diff --git a/releases/3.0/index.html b/releases/3.0/index.html index dd45e79d91..fa0f011c47 100644 --- a/releases/3.0/index.html +++ b/releases/3.0/index.html @@ -1 +1 @@ - Debezium Release Series 3.0

    latest stable

    Tested Versions

    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    Kafka Connect 3.1 and later
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Db2 for IBM i Database: 7.4
    Driver: 11.1

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    3.0.5.Final

    2024-12-18
    Snapshot field enumration in source block provides additional cases; Signals in-progress are not reprocessed upon connector restart; SQL Server is tested with transaprent data encryption; Added support for PostgreSQL 17 failover slots; JDBC offset/history can be configured in Operator CRD; Support for ad-hoc snapshot for tables whose schema was not captured

    3.0.2.Final

    2024-11-15
    Vitess connector can cand schema change events; Debezium Operator can enable Debezium Server REST API; Oracle Connector suppoert NLS time format; Offset storage in k8s config map; Improved reliability of read-only incremental snapshots; RabbitMQ sink no longer skips data in case of failure

    3.0.0.CR2

    2024-09-30
    Configurable isolation level for PostgreSQL snapshot; Support for `EXTENDED` strings in Oracle LogMiner connector; Support for `EMPTY_BLOB()` default value in Oracle connector; Faster OOTB configuration for Debezium Engine; Predictable selection of Debezium Engine implementation; Upgrade of Oracle driver to ojdbc11; Debezium containers are based on Fedora 40

    3.0.0.Beta1

    2024-08-22
    Ehcache based event buffer for Oracle connector; Advanced event aggregator metrics; Support for Decimal type in Informix; SMT for processing of PostgreSQL logical decoding messages; Support for pgvector datatypes; Imprived paralelism and shard handling in Vitess connector; RabbitMQ native stream support for individual streams; Upgrade to Apicurio 2.6.2.Final; Support for MySQL 8.3
    \ No newline at end of file + Debezium Release Series 3.0

    latest stable

    Tested Versions

    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    Kafka Connect 3.1 and later
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Db2 for IBM i Database: 7.4
    Driver: 11.1

    Not compatible with your requirements? Have a look at the other series.
    See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Documentation

    Documentation for this specific series can be accessed below:

    What's new

    For more details on breaking changes, bugfixes, and new features see the release notes.

    Installation

    If you need details on how to install Debezium, we've documented some of the most common ways in the installation guide.

    Releases in this series

    The following are all the releases/bugfixes within this series.

    3.0.5.Final

    2024-12-18
    Snapshot field enumration in source block provides additional cases; Signals in-progress are not reprocessed upon connector restart; SQL Server is tested with transaprent data encryption; Added support for PostgreSQL 17 failover slots; JDBC offset/history can be configured in Operator CRD; Support for ad-hoc snapshot for tables whose schema was not captured

    3.0.2.Final

    2024-11-15
    Vitess connector can cand schema change events; Debezium Operator can enable Debezium Server REST API; Oracle Connector suppoert NLS time format; Offset storage in k8s config map; Improved reliability of read-only incremental snapshots; RabbitMQ sink no longer skips data in case of failure

    3.0.0.CR2

    2024-09-30
    Configurable isolation level for PostgreSQL snapshot; Support for `EXTENDED` strings in Oracle LogMiner connector; Support for `EMPTY_BLOB()` default value in Oracle connector; Faster OOTB configuration for Debezium Engine; Predictable selection of Debezium Engine implementation; Upgrade of Oracle driver to ojdbc11; Debezium containers are based on Fedora 40

    3.0.0.Beta1

    2024-08-22
    Ehcache based event buffer for Oracle connector; Advanced event aggregator metrics; Support for Decimal type in Informix; SMT for processing of PostgreSQL logical decoding messages; Support for pgvector datatypes; Imprived paralelism and shard handling in Vitess connector; RabbitMQ native stream support for individual streams; Upgrade to Apicurio 2.6.2.Final; Support for MySQL 8.3
    \ No newline at end of file diff --git a/releases/3.0/release-notes.html b/releases/3.0/release-notes.html index c8ea5ea0bf..4008ac9767 100644 --- a/releases/3.0/release-notes.html +++ b/releases/3.0/release-notes.html @@ -1 +1 @@ - Release Notes for Debezium 3.0

    Release Notes for Debezium 3.0

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 3.0.5.Final (December 18th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.9.0 and has been tested with version 3.9.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.5.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium was reprocessing signals upon connector restarts. This could lead to an unpredictable behaviour and can have unintended side-effects. This feature was thus removed and if a connector stops then the signal must be re-sent (DBZ-7856).

    The snapshot enumeration in the source info block schema was extended to cover all possible cases (DBZ-8496).

    New features

    • List all examples in root README.md of Debezium’s Example Repo DBZ-2535

    • Test the MS SQL Server Plugin with Transparent data encryption (TDE) DBZ-4590

    • Allow adhoc snapshot on tables whose schemas have not been captured DBZ-4903

    • Support Postgres 17 failover slots DBZ-8412

    • Improve error handling in dispatchSnapshotEvent of EventDispatcher DBZ-8433

    • Connector configuration logging improvement DBZ-8472

    • Handle un-parseable DDLs gracefully DBZ-8479

    • Track LogMiner partial rollback events in metrics DBZ-8491

    • Support JDBC offset/history configuration in CRD DBZ-8501

    Fixes

    • Error with debezium.sink.pulsar.client.serviceUrl and debezium-server DBZ-3720

    • MySQL regression - Defaults store.only.captured.tables.ddl to true DBZ-6709

    • ExtractNewRecordState value of optional null field which has default value DBZ-7094

    • DebeziumException: No column '' where ' found in table DBZ-8034

    • MySQL Connector Does Not Act On CREATE DATABASE Records In The Binlog DBZ-8291

    • Vgtid doesn’t contain multiple shard GTIDs when multiple tasks are used DBZ-8432

    • Object ID cache may fail with concurent modification expcetion DBZ-8465

    • Oracle gathers and logs object attributes for views unnecessarily DBZ-8492

    • ReselectColumnPostProcessor can throw ORA-01003 "no statement parsed" when using fallback non-flashback area query DBZ-8493

    • Oracle DDL ALTER TABLE ADD CONSTRAINT fails to be parsed DBZ-8494

    • Edit Source/Destination on adding new configuration properties its removing old once DBZ-8495

    • Invalid property name in JDBC Schema History DBZ-8500

    • Fix the URL in Pipeline log page DBZ-8502

    • Failed to start LogMiner mining session due to "Required Start SCN" error message DBZ-8503

    • Oracle data pump TEMPLATE_TABLE clause not supported DBZ-8504

    • Postgres alpine images require lang/llvm 19 for build DBZ-8505

    • TimezoneConverter include.list should be respected if set DBZ-8514

    • Missing log classes debezium-platform-conductor DBZ-8515

    • Debezium Server fails to start when using the sink Kinesis DBZ-8517

    • Skip GoldenGate REPLICATION MARKER events DBZ-8533

    Other changes

    • Add example for SSL-enabled Kafka DBZ-1937

    • Create smoke test to make sure Debezium Server container image works DBZ-3226

    • Align MySQL and MariaDB grammars with upstream versions DBZ-8270

    • Support MongoDB 8.0 DBZ-8451

    • Update description of message.key.columns and format admonitions in PG doc DBZ-8455

    • Add Basic validation in UI to check for form completion before submitting. DBZ-8474

    • Use schema evolution tool to manage the conductor database DBZ-8486

    • Update Quarkus Outbox Extension to Quarkus 3.17.3 DBZ-8506

    • Merge conductor and stage into single platform repository DBZ-8508

    • Container Tests are executed with -DskipITs DBZ-8509

    • Add github workflow for UI unit testing on PRs DBZ-8526

    Release 3.0.4.Final (November 28th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.9.0 and has been tested with version 3.9.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Update the UI to pass on the backend URL at runtime from ENV Var while running the container image DBZ-8424

    • Add support for mysql_clear_password in mysql-binlog-connector DBZ-8445

    Fixes

    • Debezium db2i CDC source connector does not seem to pickup JOURNAL_ENTRY_TYPES ⇒ 'DR' records DBZ-8453

    • Randomly failing tests after migration to async engine DBZ-8461

    • Invalid label used for API service discriminator DBZ-8464

    Other changes

    • Migrate rest of the testsuite to async engine DBZ-7977

    • Update QOSDK to version 6.9.1 DBZ-8452

    • Add JDBC storage module in Debezium Server DBZ-8460

    Release 3.0.3.Final (November 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.9.0 and has been tested with version 3.9.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add support for bpchar datatype DBZ-8416

    • Allow parts of DS resource to reference values from primary in configuration DBZ-8431

    Fixes

    • Spanner tests fail randomly DBZ-8410

    • Engine shutdown may get stuck when error is thrown during connector stop DBZ-8414

    • JdbcOffsetBackingStore does not release lock of debezium_offset_storage gracefully DBZ-8423

    • Installation documentation typo on download link DBZ-8429

    • Asycn engine fails with NPE when transformation returns null DBZ-8434

    • Snapshot completed flag not correctly saved on offsets DBZ-8449

    • Formatting characters render in descriptions of Oracle log.mining properties DBZ-8450

    • Prevent data corruption from netty version 4.1.111.Final DBZ-8438

    Other changes

    • Support config map offset store in the DS Operator DBZ-8352

    • Migrate Vitess testsuite to async engine DBZ-8377

    • Migrate Spanner testsuite to async engine DBZ-8381

    • Do not build images for unsupported database versions DBZ-8413

    • Update PatternFly version in UI from 6.beta to final 6.0 DBZ-8415

    • Fix the UI build issue DBZ-8435

    • Make AbstractConnectorTest#createEngine method abstract DBZ-8441

    Release 3.0.2.Final (November 15th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.9.0 and has been tested with version 3.9.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add file signal channel documentation to the signal channel chapter DBZ-7245

    • Improve blocking snapshot reliability in case of restart DBZ-7903

    • Allow skipping exceptions related to DML parser errors DBZ-8208

    • Ability to enable DS REST API in Operator CR DBZ-8234

    • Add feature to download and stream the Pipeline logs from UI DBZ-8239

    • Add support for vitess-connector to send DDL events DBZ-8325

    • Vstream table filter to match full table names DBZ-8354

    • RowsScanned JMX metric for MongoDB differs from relational connectors DBZ-8359

    • Refactor CassandraTypeProvider to not contain getClusterName method DBZ-8373

    • Possibility for Debezium Oracle Connector to accept NLS Time Format (For Date and Timestamp Columns) DBZ-8379

    • Provide config to allow for sending schema change events without historized schemas DBZ-8392

    • Implement new config map offset store in DS DBZ-8351

    Fixes

    • Race condition in stop-snapshot signal DBZ-8303

    • Debezium shifts binlog offset despite RabbitMQ Timeout and unconfirmed messages DBZ-8307

    • Debezium server with eventhubs sink type and eventhubs emulator connection string fails DBZ-8357

    • Filter for snapshot using signal doesn’t seem to work DBZ-8358

    • JDBC storage module does not use quay.io images DBZ-8362

    • Failure on offset store call to configure/start is logged at DEBUG level DBZ-8364

    • Object name is not in the list of S3 schema history fields DBZ-8366

    • Faulty "Failed to load mandatory config" error message DBZ-8367

    • Upgrade protobuf dependencies to avoid potential vulnerability DBZ-8371

    • Tests in IncrementalSnapshotIT may fail randomly DBZ-8386

    • ExtractNewRecordState transform: NPE when processing non-envelope records DBZ-8393

    • Oracle LogMiner metric OldestScnAgeInMilliseconds can be negative DBZ-8395

    • SqlServerConnectorIT.restartInTheMiddleOfTxAfterCompletedTx fails randomly DBZ-8396

    • ExtractNewDocumentStateTestIT fails randomly DBZ-8397

    • BlockingSnapshotIT fails on Oracle DBZ-8398

    • Oracle OBJECT_ID lookup and cause high CPU and latency in Hybrid mining mode DBZ-8399

    • Protobuf plugin does not compile for PostgreSQL 17 on Debian DBZ-8403

    Other changes

    • Clarify signal data collection should be unique per connector DBZ-6837

    • Use DebeziumSinkRecord instead of Kafka Connect’s SinkRecord inside Debezium sink connectors DBZ-8346

    • Migrate SQL server testsuite to async engine DBZ-8353

    • Remove unnecessary converter code from parsers DBZ-8360

    • Deduplicate Cassandra Debezium tests DBZ-8363

    • Migrate MongoDB testsuite to async engine DBZ-8369

    • Migrate Oracle testsuite to async engine DBZ-8370

    • Add transform page to provide a single place to list the already configured transform plus UI to add a new transform DBZ-8374

    • Migrate rest of Debezium testsuite to async engine DBZ-8375

    • Migrate DB2 testsuite to async engine DBZ-8380

    • Migrate IBM i testsuite to async engine DBZ-8382

    • Upgrade Kafka to 3.8.1 DBZ-8385

    • Add Transform Edit and delete support. DBZ-8388

    • Log SCN existence check may throw ORA-01291 if a recent checkpoint occurred DBZ-8389

    • Upgrade Kafka to 3.9.0 DBZ-8400

    • Update Quarkus Outbox Extension to Quarkus 3.16.3 DBZ-8409

    Release 3.0.1.Final (October 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium no longer publishes container images to the Docker Hub (DBZ-8327).

    New features

    • Support batch write to AWS Kinesis DBZ-8193

    • Support for PostgreSQL 17 DBZ-8275

    • Extend Debezium Server to include support for application.yaml DBZ-8313

    • SQL Server Documentation for CDC on Server table DBZ-8314

    • Add support for MySQL 9.1 DBZ-8324

    • Support Cassandra 5.0 DBZ-8347

    Fixes

    • Oracle DDL parsing will fail if the DDL ends with a new line character DBZ-7040

    • Missing documentation for MongoDb SSL configuration DBZ-7927

    • Conditionalization implemented for single-sourcing MySQL/MariaDB content isn’t working as expected DBZ-8094

    • Debezium is replaying all events from an older offset DBZ-8194

    • Embedded MySqlConnector "Unable to find minimal snapshot lock mode" since 2.5.4.Final DBZ-8271

    • Reselect Post Processor not working when pkey of type uuid etc. DBZ-8277

    • BinlogStreamingChangeEventSource totalRecordCounter is never updated DBZ-8290

    • Restart Oracle connector when ORA-01001 invalid cursor exception is thrown DBZ-8292

    • Connector uses incorrect partition names when creating offsets DBZ-8298

    • ReselectPostProcessor fails when reselecting columns from Oracle DBZ-8304

    • Debezium MySQL DDL parser: SECONDARY_ENGINE=RAPID does not support DBZ-8305

    • Oracle DDL failure - subpartition list clause does not support in-memory clause DBZ-8315

    • DDL statement couldn’t be parsed DBZ-8316

    • Binary Log Client doesn’t process the TRANSACTION_ PAYLOAD header DBZ-8340

    • Oracle connector: archive.log.only.mode stop working after reach SYSDATE SCN DBZ-8345

    Other changes

    • Provide example for activity monitoring metrics DBZ-8174

    • Write blog post on how detect data mutation patterns with Debezium DBZ-8256

    • Formatting characters render literally in docs DBZ-8293

    • REST tests fail due to unable to execute cp DBZ-8294

    • Create MariaDB systemtests DBZ-8306

    • Refactor MySqlTests and MariaDBTests to share the tests via parent base class DBZ-8309

    • Document how to work with ServiceLoader and bundled jars DBZ-8318

    • Broken system tests for upstream DBZ-8326

    • Upstream system tests are stuck in Retrieving connector metrics DBZ-8330

    • Fix upstream JDBC system tests DBZ-8331

    • Add version for Cassandra 5 to debezium-build-parent DBZ-8348

    Release 3.0.0.Final (October 2nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    The deprecated additional-condition field of execute-snapshot signal was removed. The field is fully replaced with previously introduced additional-conditions field (DBZ-8278).

    New features

    • Add documentation for custom converters in PG DBZ-7820

    • Create REST bridge for DBZ signal channels DBZ-8101

    • Support int/bigint arrays in reselect colums postprocessors DBZ-8212

    • Log the record key when debezium fails to send the record to Kafka DBZ-8282

    Fixes

    • Custom convert (all to strings) and SQLServer default '0' type issue DBZ-7045

    • UnsupportedClassVersionError while running debezium-connector docker Image DBZ-7751

    • Error writing data to target database. (Caused by: java.lang.RuntimeException: org.postgresql.util.PSQLException: The column index is out of range: 140, number of columns: 139.) DBZ-8221

    • Debezium Server messages not being sent to Pub/Sub after restart DBZ-8236

    • An aborted ad-hoc blocking snapshot leaves the connector in a broken state DBZ-8244

    • JDBC Sink truncate event also add event to updateBufferByTable DBZ-8247

    • mysql-binlog-connector-java doesn’t compile with java 21 DBZ-8253

    • DDL statement couldn’t be parsed. 'mismatched input 'NOCACHE' expecting {'AS', 'USAGE', ';'} DBZ-8262

    • journal processing loops after journal offset reset DBZ-8265

    Other changes

    • Add async engine config options to server documentation DBZ-8133

    • Bump apicurio schema registry to 2.6.2.Final DBZ-8145

    • Correct description of the all_tables option for the PG publication.autocreate.mode property DBZ-8268

    • Test docs for productization and fix broken links and rendering errors DBZ-8284

    Release 3.0.0.CR2 (September 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.CR2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.CR2 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.CR2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Snapshot isolation level options for postgres DBZ-1252

    • Retry flush records if LockAcquisitionException occured in mysql DBZ-7291

    • Add support for MAX_STRING_SIZE set to EXTENDED DBZ-8039

    • Add invalid value logger for dates to Debezium Vitess Connector DBZ-8235

    • Support BLOB with EMPTY_BLOB() as default DBZ-8248

    Fixes

    • Debezium does not restart automatically after throwing an ORA-00600 krvrdccs30 error DBZ-8223

    • JDBC sink doesn’t include fields as per documentation DBZ-8224

    • Unbounded number of processing threads in async engine DBZ-8237

    • Streaming metrics are stuck after an ad-hoc blocking snapshot DBZ-8238

    • DDL statement couldn’t be parsed with IF EXISTS DBZ-8240

    • Random engine factory used by default DBZ-8241

    • JDBC sink test suite should use the debezium/connect:nightly image for e2e tests DBZ-8245

    • Performance Regression in Debezium Server Kafka after DBZ-7575 fix DBZ-8251

    • Error Prone library included in MySQL connector DBZ-8258

    • Debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-8259

    Other changes

    • Test and check compatibility with ojdbc11 DBZ-3658

    • Broken link to Streams doc about configuring logging DBZ-8231

    • Document passthrough hibernate.* properties for the JDBC connector DBZ-8232

    • Bump Infinispan to 15.0.8.Final DBZ-8246

    • AbstractConnectorTest consumeRecordsUntil may prematurely exit loop DBZ-8250

    • Add a note to the docs about JDBC batch retry configs DBZ-8252

    • Fix conditionalization in shared MariaDB/MySQL file DBZ-8254

    • Add Oracle FUTC license DBZ-8260

    • Remove Oracle libs from product assembly package DBZ-8261

    • debezium-connector-binlog does not need MariaDB dependency DBZ-8263

    • Provide subset package for Debezium Server DBZ-8264

    • Bump container images to Fedora 40 DBZ-8266

    Release 3.0.0.CR1 (September 13rd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    JMX signalling and notification did not work correctly for deployments with SQL Server configured for multiple tasks. To fix the issue it was necessary to change the naming of signalling and notification MBeans to make them unique per each task (DBZ-8137).

    Deprecated Oracle connector configuration options were removed from the project (DBZ-8181).

    Vector datatype names introduced for PostgreSQL were too related to PostgreSQL datatype naming. The type names were changed to more generic ones and are shared between PostgreSQL and MySQL (DBZ-8183).

    New features

    • Add support for MySQL 9 DBZ-8030

    • Add support for MySQL vector datatype DBZ-8157

    • Refactor engine signal support DBZ-8160

    • Add feature to inherit shard epoch DBZ-8163

    • Avoid 3 second delay in Oracle when one of the RAC nodes is offline DBZ-8177

    • Truncate byte buffer should return a new array DBZ-8189

    • Support for older MongoDb versions DBZ-8202

    • Add VECTOR functions to MySQL grammar DBZ-8210

    • Support MariaDB 11.4.3 DBZ-8226

    • Add information about helm chart installation to operator readme DBZ-8233

    Fixes

    • Make ORA-00600 - krvrdccs10 automatically retriable DBZ-5009

    • Incremental snapshot fails with NPE if surrogate key doesn’t exist DBZ-7797

    • MySQL 8.4 incompatibility due to removed SQL commands DBZ-7838

    • Postgres connector - null value processing for "money" type column. DBZ-8027

    • Using snapshot.include.collection.list with Oracle raises NullPointerException DBZ-8032

    • Performance degradation when reconstructing (log.mining.stragtegy hybrid mode) DBZ-8071

    • The source data type exceeds the debezium data type and cannot deserialize the object DBZ-8142

    • Incorrect use of generic types in tests DBZ-8166

    • Postgres JSONB Fields are not supported with Reselect Post Processor DBZ-8168

    • NullPointerException (schemaUpdateCache is null) when restarting Oracle engine DBZ-8187

    • XStream may fail to attach on retry if previous attempt failed DBZ-8188

    • Exclude Oracle 23 VECSYS tablespace from capture DBZ-8198

    • AbstractProcessorTest uses an incorrect database name when run against Oracle 23 Free edition DBZ-8199

    • DDL statement couldn’t be parsed: REVOKE IF EXISTS DBZ-8209

    • System testsuite fails with route name being too long DBZ-8213

    • Oracle TableSchemaBuilder provides wrong column name in error message DBZ-8217

    • Using ehcache in Kafka connect throws an XMLConfiguration parse exception DBZ-8219

    • OcpJdbcSinkConnectorIT fails DBZ-8228

    • Container image does not install correct apicurio deps DBZ-8230

    Other changes

    • Documentation for signals provides incorrect data-collection format for some connectors DBZ-8090

    • Latest Informix JDBC Driver DBZ-8167

    • upgrade Adobe s3mock to version 3.10.0 DBZ-8169

    • Include Jackson libraries to JDBC connector Docker image distribution DBZ-8175

    • Ehcache fails to start, throwing "Invaild XML Configuration" DBZ-8178

    • Enable snapshot.database.errors.max.retriesEnable during Oracle tests DBZ-8184

    • Change event for a logical decoding message doesn’t contain transaction field DBZ-8185

    • Add MariaDB connector server distribution DBZ-8186

    • Update Vitess example to Debezium 2.7/Vitess 19 DBZ-8196

    • OracleConnectorIT test shouldGracefullySkipObjectBasedTables can timeout prematurely DBZ-8197

    • Reduce log verbosity of OpenLogReplicator SCN confirmation DBZ-8201

    • Implement separate source and sink connector sections in documentation navigation DBZ-8220

    Release 3.0.0.Beta1 (August 22nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezim Kafka sink could wait indefinitely in case of Kafka broker unavailablity. A support for configurable timeout was added and the default behaviour is timeout after 30 seconds (DBZ-7575).

    RabbitMQ native stream sink was sending changes all messages into a static single stream. With the new default behaviour the changes are sent to a distinct stream for each table (DBZ-8118).

    New features

    • Implement Ehcache event buffer DBZ-7758

    • Expose a metric for number of create, update, delete events per table DBZ-8035

    • Log additional details about abandoned transactions DBZ-8044

    • Introduce timeout for replication slot creation DBZ-8073

    • ConverterBuilder doesn’t pass Headers to be manipulated DBZ-8082

    • Add SMT to decode binary content of a logical decoding message DBZ-8103

    • Support DECIMAL(p) Floating Point DBZ-8114

    • Support for PgVector datatypes DBZ-8121

    • Implement in process signal channel DBZ-8135

    • Validate log position method missing gtid info from SourceInfo DBZ-8140

    • Vitess Connector Epoch should support parallelism & shard changes DBZ-8154

    • Add an option for publication.autocreate.mode to create a publication with no tables DBZ-8156

    Fixes

    • Incremental snapshots don’t work with CloudEvent converter DBZ-7601

    • Snapshot retrying logic falls into infinite retry loop DBZ-7860

    • Primary Key Update/ Snapshot Race Condition DBZ-8113

    • Docs: connect-log4j.properties instead log4j.properties DBZ-8117

    • Recalculating mining range upper bounds causes getScnFromTimestamp to fail DBZ-8119

    • ORA-00600: internal error code, arguments: [krvrdGetUID:2], [18446744073709551614], [], [], [], [], [], [], [], [], [], [] DBZ-8125

    • ConvertingFailureIT#shouldFailConversionTimeTypeWithConnectModeWhenFailMode fails randomly DBZ-8128

    • ibmi Connector does not take custom properties into account anymore DBZ-8129

    • Unpredicatable ordering of table rows during insertion causing foreign key error DBZ-8130

    • schema_only crashes ibmi Connector DBZ-8131

    • Support larger database.server.id values DBZ-8134

    • Open redo thread consistency check can lead to ORA-01291 - missing logfile DBZ-8144

    • SchemaOnlyRecoverySnapshotter not registered as an SPI service implementation DBZ-8147

    • When stopping the Oracle rac node the Debezium server throws an expections - ORA-12514: Cannot connect to database and retries DBZ-8149

    • Issue with Debezium Snapshot: DateTimeParseException with plugin pgoutput DBZ-8150

    • JDBC connector validation fails when using record_value with no primary.key.fields DBZ-8151

    • Taking RAC node offline and back online can lead to thread inconsistency DBZ-8162

    Other changes

    • MySQL has deprecated mysql_native_password usage DBZ-7049

    • Upgrade to Apicurio 2.5.8 or higher DBZ-7357

    • Write and publish Debezium Orchestra blog post DBZ-7972

    • Move Debezium Conductor repository under Debezium Organisation DBZ-7973

    • Decide on name, jira components, etc…​ for Debezium Orchestra platform DBZ-7975

    • Migrate Postgres testsuite to async engine DBZ-8077

    • Conditionalize reference to the MySQL default value in description of schema.history.internal.store.only.captured.databases.ddl DBZ-8081

    • Bump Debezium Server to Quarkus 3.8.5 DBZ-8095

    • Converters documentation uses incorrect examples DBZ-8104

    • Remove reference to`additional condition` signal parameter from ad hoc snapshots doc DBZ-8107

    • TimescaleDbDatabaseTest.shouldTransformCompressedChunks is failing DBZ-8123

    • Update Oracle connector doc to describe options for restricting access permissions for the Debezium LogMiner user DBZ-8124

    • Use SQLSTATE to handle exceptions for replication slot creation command timeout DBZ-8127

    • Re-add check to test for if assembly profile is active DBZ-8138

    • Add LogMiner start mining session retry attempt counter to logs DBZ-8143

    • Reduce logging verbosity of XStream DML event data DBZ-8148

    • Upgrade Outbox Extension to Quarkus 3.14.0 DBZ-8164

    Release 3.0.0.Alpha2 (August 2nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium is now build with Kafka 3.8.0. There were few changes in Kafka internal APIs Debezium is using. The codebase was modified to run with both pre-3.8.0 and 3.8.0 versions (DBZ-8105).

    New features

    • Add Status ObservedGeneration to Operator DBZ-8025

    • Support Custom Converters in Debezium Server DBZ-8040

    • Support FLOAT32 type in debezium-connector-spanner DBZ-8043

    • Debezium should auto exclude empty shards (no tablets) and not crash on keyspaces with empty shards DBZ-8053

    • Refactor LogMining implementation to allow alternative cache implementations DBZ-8054

    • Standard Webhooks signatures for HTTP sink DBZ-8063

    • Vitess-connector should provide a topic naming strategy that supports separate connectors per-table DBZ-8069

    • Update third-party LICENSE with LGPL forMariaDB Connector/J DBZ-8099

    • Rabbitmq native stream Failed DBZ-8108

    Fixes

    • Embedded Infinispan tests fail to start with Java 23 DBZ-7840

    • Clarify that Oracle connector does not read from physical standby DBZ-7895

    • StackOverflow exception on incremental snapshot DBZ-8011

    • JDBC primary.key.fields cannot be empty when i set insert.mode to upsert and primary.key.mode record_value DBZ-8018

    • Unable to acquire buffer lock, buffer queue is likely full DBZ-8022

    • Release process sets incorrect images for k8s for the next development version DBZ-8041

    • Use recrate as (default) rollout strategy for deployments DBZ-8047

    • "Unexpected input: ." when snapshot incremental empty Database DBZ-8050

    • Debezium Operator Using RollingUpdate Strategy DBZ-8051

    • Debezium Operator Using RollingUpdate Strategy DBZ-8052

    • Oracle connector inconsistency in redo log switches DBZ-8055

    • Blocking snapshot can fail due to CommunicationsException DBZ-8058

    • FakeDNS not working with JDK version > 18 DBZ-8059

    • Debezium Operator with a provided Service Account doesn’t spin up deployment DBZ-8061

    • ParsingException (MySQL/MariaDB): rename table syntax DBZ-8066

    • Oracle histogram metrics are no longer printed in logs correctly DBZ-8068

    • In hybrid log.mining.strategy reconstruction logs should be set to DEBUG DBZ-8070

    • Support capturing BLOB column types during snapshot for MySQL/MariaDB DBZ-8076

    • Standard Webhooks auth secret config value is not marked as PASSWORD_PATTERN DBZ-8078

    • Vitess transaction Epoch should not reset to zero when tx ID is missing DBZ-8087

    • After changing the column datatype from int to float the Debezium fails to round it and i get a null value for this field in the stream DBZ-8089

    • MySQL and MariaDB keyword YES cannot be parsed as a column name DBZ-8092

    • NotificationIT tests seemingly seem to fail due to stepping on one another DBZ-8100

    • ORA-26928 - Unable to communicate with XStream apply coordinator process should be retriable DBZ-8102

    • Transformations are not closed in emebdded engine DBZ-8106

    • Don’t close connection after loading timescale metadata in TimescaleDb SMT DBZ-8109

    Other changes

    • Bump Infinispan to 14.0.29.Final DBZ-8010

    • Write a blog post about async engine DBZ-8013

    • Test offset/history store configurations DBZ-8015

    • Upgrade postgres server version to 15 DBZ-8062

    • Disable DebeziumResourceNoTopicCreationIT - no longer compatible with Java 21 DBZ-8067

    • Speed-up PostgresShutdownIT DBZ-8075

    • Add MariaDB to debezium/connect image DBZ-8088

    Release 3.0.0.Alpha1 (July 11st 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium connectors now require Java 17 for runtime and Java 21 for building. Debezium Server, Debezium Operator, and Debezium Outbox extension require Java 21 both for build and runtime (DBZ-6795).

    New features

    • Provide MongoDB sink connector DBZ-7223

    • Extends process of finding Bundle path DBZ-7992

    • Support FLOAT32 type in debezium-connector-spanner DBZ-8043

    Fixes

    • Debezium postgres jdbc sink not handling infinity values DBZ-7920

    • JdbcSinkTask doesn’t clear offsets on stop DBZ-7946

    • ibmi as400 connector config isn’t prefixed with "database." DBZ-7955

    • Duplicate downstream annotation comments incorrectly refer to Db2 connector DBZ-7968

    • Issue with Hybrid mode and DDL change DBZ-7991

    • Incorrect offset/history property mapping generatated DBZ-8007

    • Debezium Server Operator on minikube with java.lang.NullPointerException': java.lang.NullPointerException DBZ-8019

    • ORA-65090: operation only allowed in a container database when connecting to a non-CDB database DBZ-8023

    • Added type to Prometheus JMX exporter DBZ-8036

    • Add kafka.producer metrics to debezium-server jmx exporter config DBZ-8037

    Other changes

    • Use Java 17 as baseline DBZ-7224

    • Document new MariaDB connector DBZ-7786

    • Move to Maven 3.9.8 as build requirement DBZ-7965

    • Add disclaimer that PostProcessors and CustomConverters are Debezium source connectors only DBZ-8031

    • Typos in Bug report template DBZ-8038

    • Find an alternative way to manually deploy the connector with local changes that is compatible with Debezium 3 DBZ-8046

    \ No newline at end of file + Release Notes for Debezium 3.0

    Release Notes for Debezium 3.0

    All notable changes for Debezium releases are documented in this file. Release numbers follow Semantic Versioning.

    Release 3.0.6.Final (December 19th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.9.0 and has been tested with version 3.9.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.6.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.6.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.6.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    There are no new features in this release.

    Fixes

    Other changes

    There are no other changes in this release.

    Release 3.0.5.Final (December 18th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.9.0 and has been tested with version 3.9.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.5.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.5.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.5.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium was reprocessing signals upon connector restarts. This could lead to an unpredictable behaviour and can have unintended side-effects. This feature was thus removed and if a connector stops then the signal must be re-sent (DBZ-7856).

    The snapshot enumeration in the source info block schema was extended to cover all possible cases (DBZ-8496).

    New features

    • List all examples in root README.md of Debezium’s Example Repo DBZ-2535

    • Test the MS SQL Server Plugin with Transparent data encryption (TDE) DBZ-4590

    • Allow adhoc snapshot on tables whose schemas have not been captured DBZ-4903

    • Support Postgres 17 failover slots DBZ-8412

    • Improve error handling in dispatchSnapshotEvent of EventDispatcher DBZ-8433

    • Connector configuration logging improvement DBZ-8472

    • Handle un-parseable DDLs gracefully DBZ-8479

    • Track LogMiner partial rollback events in metrics DBZ-8491

    • Support JDBC offset/history configuration in CRD DBZ-8501

    Fixes

    • Error with debezium.sink.pulsar.client.serviceUrl and debezium-server DBZ-3720

    • MySQL regression - Defaults store.only.captured.tables.ddl to true DBZ-6709

    • ExtractNewRecordState value of optional null field which has default value DBZ-7094

    • DebeziumException: No column '' where ' found in table DBZ-8034

    • MySQL Connector Does Not Act On CREATE DATABASE Records In The Binlog DBZ-8291

    • Vgtid doesn’t contain multiple shard GTIDs when multiple tasks are used DBZ-8432

    • Object ID cache may fail with concurent modification expcetion DBZ-8465

    • Oracle gathers and logs object attributes for views unnecessarily DBZ-8492

    • ReselectColumnPostProcessor can throw ORA-01003 "no statement parsed" when using fallback non-flashback area query DBZ-8493

    • Oracle DDL ALTER TABLE ADD CONSTRAINT fails to be parsed DBZ-8494

    • Edit Source/Destination on adding new configuration properties its removing old once DBZ-8495

    • Invalid property name in JDBC Schema History DBZ-8500

    • Fix the URL in Pipeline log page DBZ-8502

    • Failed to start LogMiner mining session due to "Required Start SCN" error message DBZ-8503

    • Oracle data pump TEMPLATE_TABLE clause not supported DBZ-8504

    • Postgres alpine images require lang/llvm 19 for build DBZ-8505

    • TimezoneConverter include.list should be respected if set DBZ-8514

    • Missing log classes debezium-platform-conductor DBZ-8515

    • Debezium Server fails to start when using the sink Kinesis DBZ-8517

    • Skip GoldenGate REPLICATION MARKER events DBZ-8533

    Other changes

    • Add example for SSL-enabled Kafka DBZ-1937

    • Create smoke test to make sure Debezium Server container image works DBZ-3226

    • Align MySQL and MariaDB grammars with upstream versions DBZ-8270

    • Support MongoDB 8.0 DBZ-8451

    • Update description of message.key.columns and format admonitions in PG doc DBZ-8455

    • Add Basic validation in UI to check for form completion before submitting. DBZ-8474

    • Use schema evolution tool to manage the conductor database DBZ-8486

    • Update Quarkus Outbox Extension to Quarkus 3.17.3 DBZ-8506

    • Merge conductor and stage into single platform repository DBZ-8508

    • Container Tests are executed with -DskipITs DBZ-8509

    • Add github workflow for UI unit testing on PRs DBZ-8526

    Release 3.0.4.Final (November 28th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.9.0 and has been tested with version 3.9.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.4.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.4.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.4.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Update the UI to pass on the backend URL at runtime from ENV Var while running the container image DBZ-8424

    • Add support for mysql_clear_password in mysql-binlog-connector DBZ-8445

    Fixes

    • Debezium db2i CDC source connector does not seem to pickup JOURNAL_ENTRY_TYPES ⇒ 'DR' records DBZ-8453

    • Randomly failing tests after migration to async engine DBZ-8461

    • Invalid label used for API service discriminator DBZ-8464

    Other changes

    • Migrate rest of the testsuite to async engine DBZ-7977

    • Update QOSDK to version 6.9.1 DBZ-8452

    • Add JDBC storage module in Debezium Server DBZ-8460

    Release 3.0.3.Final (November 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.9.0 and has been tested with version 3.9.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.3.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.3.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.3.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add support for bpchar datatype DBZ-8416

    • Allow parts of DS resource to reference values from primary in configuration DBZ-8431

    Fixes

    • Spanner tests fail randomly DBZ-8410

    • Engine shutdown may get stuck when error is thrown during connector stop DBZ-8414

    • JdbcOffsetBackingStore does not release lock of debezium_offset_storage gracefully DBZ-8423

    • Installation documentation typo on download link DBZ-8429

    • Asycn engine fails with NPE when transformation returns null DBZ-8434

    • Snapshot completed flag not correctly saved on offsets DBZ-8449

    • Formatting characters render in descriptions of Oracle log.mining properties DBZ-8450

    • Prevent data corruption from netty version 4.1.111.Final DBZ-8438

    Other changes

    • Support config map offset store in the DS Operator DBZ-8352

    • Migrate Vitess testsuite to async engine DBZ-8377

    • Migrate Spanner testsuite to async engine DBZ-8381

    • Do not build images for unsupported database versions DBZ-8413

    • Update PatternFly version in UI from 6.beta to final 6.0 DBZ-8415

    • Fix the UI build issue DBZ-8435

    • Make AbstractConnectorTest#createEngine method abstract DBZ-8441

    Release 3.0.2.Final (November 15th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.9.0 and has been tested with version 3.9.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.2.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.2.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.2.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Add file signal channel documentation to the signal channel chapter DBZ-7245

    • Improve blocking snapshot reliability in case of restart DBZ-7903

    • Allow skipping exceptions related to DML parser errors DBZ-8208

    • Ability to enable DS REST API in Operator CR DBZ-8234

    • Add feature to download and stream the Pipeline logs from UI DBZ-8239

    • Add support for vitess-connector to send DDL events DBZ-8325

    • Vstream table filter to match full table names DBZ-8354

    • RowsScanned JMX metric for MongoDB differs from relational connectors DBZ-8359

    • Refactor CassandraTypeProvider to not contain getClusterName method DBZ-8373

    • Possibility for Debezium Oracle Connector to accept NLS Time Format (For Date and Timestamp Columns) DBZ-8379

    • Provide config to allow for sending schema change events without historized schemas DBZ-8392

    • Implement new config map offset store in DS DBZ-8351

    Fixes

    • Race condition in stop-snapshot signal DBZ-8303

    • Debezium shifts binlog offset despite RabbitMQ Timeout and unconfirmed messages DBZ-8307

    • Debezium server with eventhubs sink type and eventhubs emulator connection string fails DBZ-8357

    • Filter for snapshot using signal doesn’t seem to work DBZ-8358

    • JDBC storage module does not use quay.io images DBZ-8362

    • Failure on offset store call to configure/start is logged at DEBUG level DBZ-8364

    • Object name is not in the list of S3 schema history fields DBZ-8366

    • Faulty "Failed to load mandatory config" error message DBZ-8367

    • Upgrade protobuf dependencies to avoid potential vulnerability DBZ-8371

    • Tests in IncrementalSnapshotIT may fail randomly DBZ-8386

    • ExtractNewRecordState transform: NPE when processing non-envelope records DBZ-8393

    • Oracle LogMiner metric OldestScnAgeInMilliseconds can be negative DBZ-8395

    • SqlServerConnectorIT.restartInTheMiddleOfTxAfterCompletedTx fails randomly DBZ-8396

    • ExtractNewDocumentStateTestIT fails randomly DBZ-8397

    • BlockingSnapshotIT fails on Oracle DBZ-8398

    • Oracle OBJECT_ID lookup and cause high CPU and latency in Hybrid mining mode DBZ-8399

    • Protobuf plugin does not compile for PostgreSQL 17 on Debian DBZ-8403

    Other changes

    • Clarify signal data collection should be unique per connector DBZ-6837

    • Use DebeziumSinkRecord instead of Kafka Connect’s SinkRecord inside Debezium sink connectors DBZ-8346

    • Migrate SQL server testsuite to async engine DBZ-8353

    • Remove unnecessary converter code from parsers DBZ-8360

    • Deduplicate Cassandra Debezium tests DBZ-8363

    • Migrate MongoDB testsuite to async engine DBZ-8369

    • Migrate Oracle testsuite to async engine DBZ-8370

    • Add transform page to provide a single place to list the already configured transform plus UI to add a new transform DBZ-8374

    • Migrate rest of Debezium testsuite to async engine DBZ-8375

    • Migrate DB2 testsuite to async engine DBZ-8380

    • Migrate IBM i testsuite to async engine DBZ-8382

    • Upgrade Kafka to 3.8.1 DBZ-8385

    • Add Transform Edit and delete support. DBZ-8388

    • Log SCN existence check may throw ORA-01291 if a recent checkpoint occurred DBZ-8389

    • Upgrade Kafka to 3.9.0 DBZ-8400

    • Update Quarkus Outbox Extension to Quarkus 3.16.3 DBZ-8409

    Release 3.0.1.Final (October 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.1.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.1.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.1.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium no longer publishes container images to the Docker Hub (DBZ-8327).

    New features

    • Support batch write to AWS Kinesis DBZ-8193

    • Support for PostgreSQL 17 DBZ-8275

    • Extend Debezium Server to include support for application.yaml DBZ-8313

    • SQL Server Documentation for CDC on Server table DBZ-8314

    • Add support for MySQL 9.1 DBZ-8324

    • Support Cassandra 5.0 DBZ-8347

    Fixes

    • Oracle DDL parsing will fail if the DDL ends with a new line character DBZ-7040

    • Missing documentation for MongoDb SSL configuration DBZ-7927

    • Conditionalization implemented for single-sourcing MySQL/MariaDB content isn’t working as expected DBZ-8094

    • Debezium is replaying all events from an older offset DBZ-8194

    • Embedded MySqlConnector "Unable to find minimal snapshot lock mode" since 2.5.4.Final DBZ-8271

    • Reselect Post Processor not working when pkey of type uuid etc. DBZ-8277

    • BinlogStreamingChangeEventSource totalRecordCounter is never updated DBZ-8290

    • Restart Oracle connector when ORA-01001 invalid cursor exception is thrown DBZ-8292

    • Connector uses incorrect partition names when creating offsets DBZ-8298

    • ReselectPostProcessor fails when reselecting columns from Oracle DBZ-8304

    • Debezium MySQL DDL parser: SECONDARY_ENGINE=RAPID does not support DBZ-8305

    • Oracle DDL failure - subpartition list clause does not support in-memory clause DBZ-8315

    • DDL statement couldn’t be parsed DBZ-8316

    • Binary Log Client doesn’t process the TRANSACTION_ PAYLOAD header DBZ-8340

    • Oracle connector: archive.log.only.mode stop working after reach SYSDATE SCN DBZ-8345

    Other changes

    • Provide example for activity monitoring metrics DBZ-8174

    • Write blog post on how detect data mutation patterns with Debezium DBZ-8256

    • Formatting characters render literally in docs DBZ-8293

    • REST tests fail due to unable to execute cp DBZ-8294

    • Create MariaDB systemtests DBZ-8306

    • Refactor MySqlTests and MariaDBTests to share the tests via parent base class DBZ-8309

    • Document how to work with ServiceLoader and bundled jars DBZ-8318

    • Broken system tests for upstream DBZ-8326

    • Upstream system tests are stuck in Retrieving connector metrics DBZ-8330

    • Fix upstream JDBC system tests DBZ-8331

    • Add version for Cassandra 5 to debezium-build-parent DBZ-8348

    Release 3.0.0.Final (October 2nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.Final from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.Final plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.Final connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    The deprecated additional-condition field of execute-snapshot signal was removed. The field is fully replaced with previously introduced additional-conditions field (DBZ-8278).

    New features

    • Add documentation for custom converters in PG DBZ-7820

    • Create REST bridge for DBZ signal channels DBZ-8101

    • Support int/bigint arrays in reselect colums postprocessors DBZ-8212

    • Log the record key when debezium fails to send the record to Kafka DBZ-8282

    Fixes

    • Custom convert (all to strings) and SQLServer default '0' type issue DBZ-7045

    • UnsupportedClassVersionError while running debezium-connector docker Image DBZ-7751

    • Error writing data to target database. (Caused by: java.lang.RuntimeException: org.postgresql.util.PSQLException: The column index is out of range: 140, number of columns: 139.) DBZ-8221

    • Debezium Server messages not being sent to Pub/Sub after restart DBZ-8236

    • An aborted ad-hoc blocking snapshot leaves the connector in a broken state DBZ-8244

    • JDBC Sink truncate event also add event to updateBufferByTable DBZ-8247

    • mysql-binlog-connector-java doesn’t compile with java 21 DBZ-8253

    • DDL statement couldn’t be parsed. 'mismatched input 'NOCACHE' expecting {'AS', 'USAGE', ';'} DBZ-8262

    • journal processing loops after journal offset reset DBZ-8265

    Other changes

    • Add async engine config options to server documentation DBZ-8133

    • Bump apicurio schema registry to 2.6.2.Final DBZ-8145

    • Correct description of the all_tables option for the PG publication.autocreate.mode property DBZ-8268

    • Test docs for productization and fix broken links and rendering errors DBZ-8284

    Release 3.0.0.CR2 (September 25th 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.CR2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.CR2 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.CR2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    There are no breaking changes in this release.

    New features

    • Snapshot isolation level options for postgres DBZ-1252

    • Retry flush records if LockAcquisitionException occured in mysql DBZ-7291

    • Add support for MAX_STRING_SIZE set to EXTENDED DBZ-8039

    • Add invalid value logger for dates to Debezium Vitess Connector DBZ-8235

    • Support BLOB with EMPTY_BLOB() as default DBZ-8248

    Fixes

    • Debezium does not restart automatically after throwing an ORA-00600 krvrdccs30 error DBZ-8223

    • JDBC sink doesn’t include fields as per documentation DBZ-8224

    • Unbounded number of processing threads in async engine DBZ-8237

    • Streaming metrics are stuck after an ad-hoc blocking snapshot DBZ-8238

    • DDL statement couldn’t be parsed with IF EXISTS DBZ-8240

    • Random engine factory used by default DBZ-8241

    • JDBC sink test suite should use the debezium/connect:nightly image for e2e tests DBZ-8245

    • Performance Regression in Debezium Server Kafka after DBZ-7575 fix DBZ-8251

    • Error Prone library included in MySQL connector DBZ-8258

    • Debezium.text.ParsingException: DDL statement couldn’t be parsed DBZ-8259

    Other changes

    • Test and check compatibility with ojdbc11 DBZ-3658

    • Broken link to Streams doc about configuring logging DBZ-8231

    • Document passthrough hibernate.* properties for the JDBC connector DBZ-8232

    • Bump Infinispan to 15.0.8.Final DBZ-8246

    • AbstractConnectorTest consumeRecordsUntil may prematurely exit loop DBZ-8250

    • Add a note to the docs about JDBC batch retry configs DBZ-8252

    • Fix conditionalization in shared MariaDB/MySQL file DBZ-8254

    • Add Oracle FUTC license DBZ-8260

    • Remove Oracle libs from product assembly package DBZ-8261

    • debezium-connector-binlog does not need MariaDB dependency DBZ-8263

    • Provide subset package for Debezium Server DBZ-8264

    • Bump container images to Fedora 40 DBZ-8266

    Release 3.0.0.CR1 (September 13rd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.CR1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.CR1 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.CR1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    JMX signalling and notification did not work correctly for deployments with SQL Server configured for multiple tasks. To fix the issue it was necessary to change the naming of signalling and notification MBeans to make them unique per each task (DBZ-8137).

    Deprecated Oracle connector configuration options were removed from the project (DBZ-8181).

    Vector datatype names introduced for PostgreSQL were too related to PostgreSQL datatype naming. The type names were changed to more generic ones and are shared between PostgreSQL and MySQL (DBZ-8183).

    New features

    • Add support for MySQL 9 DBZ-8030

    • Add support for MySQL vector datatype DBZ-8157

    • Refactor engine signal support DBZ-8160

    • Add feature to inherit shard epoch DBZ-8163

    • Avoid 3 second delay in Oracle when one of the RAC nodes is offline DBZ-8177

    • Truncate byte buffer should return a new array DBZ-8189

    • Support for older MongoDb versions DBZ-8202

    • Add VECTOR functions to MySQL grammar DBZ-8210

    • Support MariaDB 11.4.3 DBZ-8226

    • Add information about helm chart installation to operator readme DBZ-8233

    Fixes

    • Make ORA-00600 - krvrdccs10 automatically retriable DBZ-5009

    • Incremental snapshot fails with NPE if surrogate key doesn’t exist DBZ-7797

    • MySQL 8.4 incompatibility due to removed SQL commands DBZ-7838

    • Postgres connector - null value processing for "money" type column. DBZ-8027

    • Using snapshot.include.collection.list with Oracle raises NullPointerException DBZ-8032

    • Performance degradation when reconstructing (log.mining.stragtegy hybrid mode) DBZ-8071

    • The source data type exceeds the debezium data type and cannot deserialize the object DBZ-8142

    • Incorrect use of generic types in tests DBZ-8166

    • Postgres JSONB Fields are not supported with Reselect Post Processor DBZ-8168

    • NullPointerException (schemaUpdateCache is null) when restarting Oracle engine DBZ-8187

    • XStream may fail to attach on retry if previous attempt failed DBZ-8188

    • Exclude Oracle 23 VECSYS tablespace from capture DBZ-8198

    • AbstractProcessorTest uses an incorrect database name when run against Oracle 23 Free edition DBZ-8199

    • DDL statement couldn’t be parsed: REVOKE IF EXISTS DBZ-8209

    • System testsuite fails with route name being too long DBZ-8213

    • Oracle TableSchemaBuilder provides wrong column name in error message DBZ-8217

    • Using ehcache in Kafka connect throws an XMLConfiguration parse exception DBZ-8219

    • OcpJdbcSinkConnectorIT fails DBZ-8228

    • Container image does not install correct apicurio deps DBZ-8230

    Other changes

    • Documentation for signals provides incorrect data-collection format for some connectors DBZ-8090

    • Latest Informix JDBC Driver DBZ-8167

    • upgrade Adobe s3mock to version 3.10.0 DBZ-8169

    • Include Jackson libraries to JDBC connector Docker image distribution DBZ-8175

    • Ehcache fails to start, throwing "Invaild XML Configuration" DBZ-8178

    • Enable snapshot.database.errors.max.retriesEnable during Oracle tests DBZ-8184

    • Change event for a logical decoding message doesn’t contain transaction field DBZ-8185

    • Add MariaDB connector server distribution DBZ-8186

    • Update Vitess example to Debezium 2.7/Vitess 19 DBZ-8196

    • OracleConnectorIT test shouldGracefullySkipObjectBasedTables can timeout prematurely DBZ-8197

    • Reduce log verbosity of OpenLogReplicator SCN confirmation DBZ-8201

    • Implement separate source and sink connector sections in documentation navigation DBZ-8220

    Release 3.0.0.Beta1 (August 22nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.Beta1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.Beta1 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.Beta1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezim Kafka sink could wait indefinitely in case of Kafka broker unavailablity. A support for configurable timeout was added and the default behaviour is timeout after 30 seconds (DBZ-7575).

    RabbitMQ native stream sink was sending changes all messages into a static single stream. With the new default behaviour the changes are sent to a distinct stream for each table (DBZ-8118).

    New features

    • Implement Ehcache event buffer DBZ-7758

    • Expose a metric for number of create, update, delete events per table DBZ-8035

    • Log additional details about abandoned transactions DBZ-8044

    • Introduce timeout for replication slot creation DBZ-8073

    • ConverterBuilder doesn’t pass Headers to be manipulated DBZ-8082

    • Add SMT to decode binary content of a logical decoding message DBZ-8103

    • Support DECIMAL(p) Floating Point DBZ-8114

    • Support for PgVector datatypes DBZ-8121

    • Implement in process signal channel DBZ-8135

    • Validate log position method missing gtid info from SourceInfo DBZ-8140

    • Vitess Connector Epoch should support parallelism & shard changes DBZ-8154

    • Add an option for publication.autocreate.mode to create a publication with no tables DBZ-8156

    Fixes

    • Incremental snapshots don’t work with CloudEvent converter DBZ-7601

    • Snapshot retrying logic falls into infinite retry loop DBZ-7860

    • Primary Key Update/ Snapshot Race Condition DBZ-8113

    • Docs: connect-log4j.properties instead log4j.properties DBZ-8117

    • Recalculating mining range upper bounds causes getScnFromTimestamp to fail DBZ-8119

    • ORA-00600: internal error code, arguments: [krvrdGetUID:2], [18446744073709551614], [], [], [], [], [], [], [], [], [], [] DBZ-8125

    • ConvertingFailureIT#shouldFailConversionTimeTypeWithConnectModeWhenFailMode fails randomly DBZ-8128

    • ibmi Connector does not take custom properties into account anymore DBZ-8129

    • Unpredicatable ordering of table rows during insertion causing foreign key error DBZ-8130

    • schema_only crashes ibmi Connector DBZ-8131

    • Support larger database.server.id values DBZ-8134

    • Open redo thread consistency check can lead to ORA-01291 - missing logfile DBZ-8144

    • SchemaOnlyRecoverySnapshotter not registered as an SPI service implementation DBZ-8147

    • When stopping the Oracle rac node the Debezium server throws an expections - ORA-12514: Cannot connect to database and retries DBZ-8149

    • Issue with Debezium Snapshot: DateTimeParseException with plugin pgoutput DBZ-8150

    • JDBC connector validation fails when using record_value with no primary.key.fields DBZ-8151

    • Taking RAC node offline and back online can lead to thread inconsistency DBZ-8162

    Other changes

    • MySQL has deprecated mysql_native_password usage DBZ-7049

    • Upgrade to Apicurio 2.5.8 or higher DBZ-7357

    • Write and publish Debezium Orchestra blog post DBZ-7972

    • Move Debezium Conductor repository under Debezium Organisation DBZ-7973

    • Decide on name, jira components, etc…​ for Debezium Orchestra platform DBZ-7975

    • Migrate Postgres testsuite to async engine DBZ-8077

    • Conditionalize reference to the MySQL default value in description of schema.history.internal.store.only.captured.databases.ddl DBZ-8081

    • Bump Debezium Server to Quarkus 3.8.5 DBZ-8095

    • Converters documentation uses incorrect examples DBZ-8104

    • Remove reference to`additional condition` signal parameter from ad hoc snapshots doc DBZ-8107

    • TimescaleDbDatabaseTest.shouldTransformCompressedChunks is failing DBZ-8123

    • Update Oracle connector doc to describe options for restricting access permissions for the Debezium LogMiner user DBZ-8124

    • Use SQLSTATE to handle exceptions for replication slot creation command timeout DBZ-8127

    • Re-add check to test for if assembly profile is active DBZ-8138

    • Add LogMiner start mining session retry attempt counter to logs DBZ-8143

    • Reduce logging verbosity of XStream DML event data DBZ-8148

    • Upgrade Outbox Extension to Quarkus 3.14.0 DBZ-8164

    Release 3.0.0.Alpha2 (August 2nd 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.8.0 and has been tested with version 3.8.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.Alpha2 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.Alpha2 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.Alpha2 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium is now build with Kafka 3.8.0. There were few changes in Kafka internal APIs Debezium is using. The codebase was modified to run with both pre-3.8.0 and 3.8.0 versions (DBZ-8105).

    New features

    • Add Status ObservedGeneration to Operator DBZ-8025

    • Support Custom Converters in Debezium Server DBZ-8040

    • Support FLOAT32 type in debezium-connector-spanner DBZ-8043

    • Debezium should auto exclude empty shards (no tablets) and not crash on keyspaces with empty shards DBZ-8053

    • Refactor LogMining implementation to allow alternative cache implementations DBZ-8054

    • Standard Webhooks signatures for HTTP sink DBZ-8063

    • Vitess-connector should provide a topic naming strategy that supports separate connectors per-table DBZ-8069

    • Update third-party LICENSE with LGPL forMariaDB Connector/J DBZ-8099

    • Rabbitmq native stream Failed DBZ-8108

    Fixes

    • Embedded Infinispan tests fail to start with Java 23 DBZ-7840

    • Clarify that Oracle connector does not read from physical standby DBZ-7895

    • StackOverflow exception on incremental snapshot DBZ-8011

    • JDBC primary.key.fields cannot be empty when i set insert.mode to upsert and primary.key.mode record_value DBZ-8018

    • Unable to acquire buffer lock, buffer queue is likely full DBZ-8022

    • Release process sets incorrect images for k8s for the next development version DBZ-8041

    • Use recrate as (default) rollout strategy for deployments DBZ-8047

    • "Unexpected input: ." when snapshot incremental empty Database DBZ-8050

    • Debezium Operator Using RollingUpdate Strategy DBZ-8051

    • Debezium Operator Using RollingUpdate Strategy DBZ-8052

    • Oracle connector inconsistency in redo log switches DBZ-8055

    • Blocking snapshot can fail due to CommunicationsException DBZ-8058

    • FakeDNS not working with JDK version > 18 DBZ-8059

    • Debezium Operator with a provided Service Account doesn’t spin up deployment DBZ-8061

    • ParsingException (MySQL/MariaDB): rename table syntax DBZ-8066

    • Oracle histogram metrics are no longer printed in logs correctly DBZ-8068

    • In hybrid log.mining.strategy reconstruction logs should be set to DEBUG DBZ-8070

    • Support capturing BLOB column types during snapshot for MySQL/MariaDB DBZ-8076

    • Standard Webhooks auth secret config value is not marked as PASSWORD_PATTERN DBZ-8078

    • Vitess transaction Epoch should not reset to zero when tx ID is missing DBZ-8087

    • After changing the column datatype from int to float the Debezium fails to round it and i get a null value for this field in the stream DBZ-8089

    • MySQL and MariaDB keyword YES cannot be parsed as a column name DBZ-8092

    • NotificationIT tests seemingly seem to fail due to stepping on one another DBZ-8100

    • ORA-26928 - Unable to communicate with XStream apply coordinator process should be retriable DBZ-8102

    • Transformations are not closed in emebdded engine DBZ-8106

    • Don’t close connection after loading timescale metadata in TimescaleDb SMT DBZ-8109

    Other changes

    • Bump Infinispan to 14.0.29.Final DBZ-8010

    • Write a blog post about async engine DBZ-8013

    • Test offset/history store configurations DBZ-8015

    • Upgrade postgres server version to 15 DBZ-8062

    • Disable DebeziumResourceNoTopicCreationIT - no longer compatible with Java 21 DBZ-8067

    • Speed-up PostgresShutdownIT DBZ-8075

    • Add MariaDB to debezium/connect image DBZ-8088

    Release 3.0.0.Alpha1 (July 11st 2024)

    Kafka compatibility

    This release has been built against Kafka Connect 3.7.0 and has been tested with version 3.7.0 of the Kafka brokers. See the Kafka documentation for compatibility with other versions of Kafka brokers.

    Upgrading

    Before upgrading any connector, be sure to check the backward-incompatible changes that have been made since the release you were using.

    When you decide to upgrade one of these connectors to 3.0.0.Alpha1 from any earlier versions, first check the migration notes for the version you’re using. Gracefully stop the running connector, remove the old plugin files, install the 3.0.0.Alpha1 plugin files, and restart the connector using the same configuration. Upon restart, the 3.0.0.Alpha1 connectors will continue where the previous connector left off. As one might expect, all change events previously written to Kafka by the old connector will not be modified.

    If you are using our container images, then please do not forget to pull them fresh from Quay.io.

    Breaking changes

    Debezium connectors now require Java 17 for runtime and Java 21 for building. Debezium Server, Debezium Operator, and Debezium Outbox extension require Java 21 both for build and runtime (DBZ-6795).

    New features

    • Provide MongoDB sink connector DBZ-7223

    • Extends process of finding Bundle path DBZ-7992

    • Support FLOAT32 type in debezium-connector-spanner DBZ-8043

    Fixes

    • Debezium postgres jdbc sink not handling infinity values DBZ-7920

    • JdbcSinkTask doesn’t clear offsets on stop DBZ-7946

    • ibmi as400 connector config isn’t prefixed with "database." DBZ-7955

    • Duplicate downstream annotation comments incorrectly refer to Db2 connector DBZ-7968

    • Issue with Hybrid mode and DDL change DBZ-7991

    • Incorrect offset/history property mapping generatated DBZ-8007

    • Debezium Server Operator on minikube with java.lang.NullPointerException': java.lang.NullPointerException DBZ-8019

    • ORA-65090: operation only allowed in a container database when connecting to a non-CDB database DBZ-8023

    • Added type to Prometheus JMX exporter DBZ-8036

    • Add kafka.producer metrics to debezium-server jmx exporter config DBZ-8037

    Other changes

    • Use Java 17 as baseline DBZ-7224

    • Document new MariaDB connector DBZ-7786

    • Move to Maven 3.9.8 as build requirement DBZ-7965

    • Add disclaimer that PostProcessors and CustomConverters are Debezium source connectors only DBZ-8031

    • Typos in Bug report template DBZ-8038

    • Find an alternative way to manually deploy the connector with local changes that is compatible with Debezium 3 DBZ-8046

    \ No newline at end of file diff --git a/releases/index.html b/releases/index.html index d6ddb84bb7..f9db74b5b4 100644 --- a/releases/index.html +++ b/releases/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/10/index.html b/releases/page/10/index.html index 5fc17e01bd..f0a5c03807 100644 --- a/releases/page/10/index.html +++ b/releases/page/10/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/11/index.html b/releases/page/11/index.html index 01e7bcd550..d02f2b4f77 100644 --- a/releases/page/11/index.html +++ b/releases/page/11/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/12/index.html b/releases/page/12/index.html index 65d1b009b2..1e4cfdcc2a 100644 --- a/releases/page/12/index.html +++ b/releases/page/12/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/13/index.html b/releases/page/13/index.html index 71b9de3d3c..a32f3b958e 100644 --- a/releases/page/13/index.html +++ b/releases/page/13/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/14/index.html b/releases/page/14/index.html index 69bed5ab7a..c52961ef48 100644 --- a/releases/page/14/index.html +++ b/releases/page/14/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/15/index.html b/releases/page/15/index.html index f285e8b315..d55e860b81 100644 --- a/releases/page/15/index.html +++ b/releases/page/15/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/16/index.html b/releases/page/16/index.html index bea9abbeb9..99c0b3f88f 100644 --- a/releases/page/16/index.html +++ b/releases/page/16/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/17/index.html b/releases/page/17/index.html index 02ddee865c..9548c56f02 100644 --- a/releases/page/17/index.html +++ b/releases/page/17/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/18/index.html b/releases/page/18/index.html index 30cb6ae021..7d1b7819dc 100644 --- a/releases/page/18/index.html +++ b/releases/page/18/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/19/index.html b/releases/page/19/index.html index c70d249e88..d1d2ab008f 100644 --- a/releases/page/19/index.html +++ b/releases/page/19/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/2/index.html b/releases/page/2/index.html index 828b73e317..9d1efa9ed0 100644 --- a/releases/page/2/index.html +++ b/releases/page/2/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/20/index.html b/releases/page/20/index.html index bd9eae4fcb..029c2da6bf 100644 --- a/releases/page/20/index.html +++ b/releases/page/20/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/21/index.html b/releases/page/21/index.html index ea61af54de..5ca4df79ee 100644 --- a/releases/page/21/index.html +++ b/releases/page/21/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/22/index.html b/releases/page/22/index.html index 1e9d85ba35..c5ee9fcefa 100644 --- a/releases/page/22/index.html +++ b/releases/page/22/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/23/index.html b/releases/page/23/index.html index 52533aead7..f412cc1b0c 100644 --- a/releases/page/23/index.html +++ b/releases/page/23/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/24/index.html b/releases/page/24/index.html index 5bdc07cfbf..5ef06a5a5d 100644 --- a/releases/page/24/index.html +++ b/releases/page/24/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/25/index.html b/releases/page/25/index.html index c408572a95..9e2dcf1e84 100644 --- a/releases/page/25/index.html +++ b/releases/page/25/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/26/index.html b/releases/page/26/index.html index e18947e9f4..16c9f8f1ab 100644 --- a/releases/page/26/index.html +++ b/releases/page/26/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/27/index.html b/releases/page/27/index.html index 8c7d9085a7..8e97701f70 100644 --- a/releases/page/27/index.html +++ b/releases/page/27/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/28/index.html b/releases/page/28/index.html index f1a577daaf..fe0b338ef8 100644 --- a/releases/page/28/index.html +++ b/releases/page/28/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/29/index.html b/releases/page/29/index.html index 019e47ca3d..285ad806db 100644 --- a/releases/page/29/index.html +++ b/releases/page/29/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/3/index.html b/releases/page/3/index.html index b571fee490..54035dfda1 100644 --- a/releases/page/3/index.html +++ b/releases/page/3/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/30/index.html b/releases/page/30/index.html index 0cdc62874d..afd4df417b 100644 --- a/releases/page/30/index.html +++ b/releases/page/30/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/31/index.html b/releases/page/31/index.html index 2c79fee9df..a5aed70c6a 100644 --- a/releases/page/31/index.html +++ b/releases/page/31/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/32/index.html b/releases/page/32/index.html index 930ffab385..292c35ebc7 100644 --- a/releases/page/32/index.html +++ b/releases/page/32/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/33/index.html b/releases/page/33/index.html index 1035fd2fac..f8387c2eab 100644 --- a/releases/page/33/index.html +++ b/releases/page/33/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/34/index.html b/releases/page/34/index.html index a478028ddb..0c905ef24b 100644 --- a/releases/page/34/index.html +++ b/releases/page/34/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/35/index.html b/releases/page/35/index.html index 392c5853ca..829ce5b58e 100644 --- a/releases/page/35/index.html +++ b/releases/page/35/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/36/index.html b/releases/page/36/index.html index 9bd3432f26..4d11b8df1c 100644 --- a/releases/page/36/index.html +++ b/releases/page/36/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/37/index.html b/releases/page/37/index.html index f064dc3813..185cdc3d43 100644 --- a/releases/page/37/index.html +++ b/releases/page/37/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/38/index.html b/releases/page/38/index.html index de5c963b9a..a3aca9db68 100644 --- a/releases/page/38/index.html +++ b/releases/page/38/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/39/index.html b/releases/page/39/index.html index ab0363f630..6921b486cc 100644 --- a/releases/page/39/index.html +++ b/releases/page/39/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/4/index.html b/releases/page/4/index.html index 1f1b309c50..f80e933788 100644 --- a/releases/page/4/index.html +++ b/releases/page/4/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/40/index.html b/releases/page/40/index.html index a30369b6fe..f1503cc129 100644 --- a/releases/page/40/index.html +++ b/releases/page/40/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/41/index.html b/releases/page/41/index.html index bfd3d5f76e..aac4ba1f57 100644 --- a/releases/page/41/index.html +++ b/releases/page/41/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/42/index.html b/releases/page/42/index.html index 5d54b7a054..6af26e6709 100644 --- a/releases/page/42/index.html +++ b/releases/page/42/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/43/index.html b/releases/page/43/index.html index 1edf110868..9512893368 100644 --- a/releases/page/43/index.html +++ b/releases/page/43/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/44/index.html b/releases/page/44/index.html index bd27b840e0..7713aff16e 100644 --- a/releases/page/44/index.html +++ b/releases/page/44/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/45/index.html b/releases/page/45/index.html index 84433d4e54..641bc10b4b 100644 --- a/releases/page/45/index.html +++ b/releases/page/45/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/46/index.html b/releases/page/46/index.html index e7a3cfcf66..b0d9c0bd2f 100644 --- a/releases/page/46/index.html +++ b/releases/page/46/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/47/index.html b/releases/page/47/index.html index b8ab46b7b1..c844328faa 100644 --- a/releases/page/47/index.html +++ b/releases/page/47/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/48/index.html b/releases/page/48/index.html index 84d4339c83..ead3c5bcbc 100644 --- a/releases/page/48/index.html +++ b/releases/page/48/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/49/index.html b/releases/page/49/index.html index 30461c68e5..9240685683 100644 --- a/releases/page/49/index.html +++ b/releases/page/49/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/5/index.html b/releases/page/5/index.html index 9a3234fb50..a420334962 100644 --- a/releases/page/5/index.html +++ b/releases/page/5/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/50/index.html b/releases/page/50/index.html index 3badc11889..f551a26096 100644 --- a/releases/page/50/index.html +++ b/releases/page/50/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/51/index.html b/releases/page/51/index.html index 7f21db4c0b..1b8f161092 100644 --- a/releases/page/51/index.html +++ b/releases/page/51/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/52/index.html b/releases/page/52/index.html index c702162311..3f4e1772cc 100644 --- a/releases/page/52/index.html +++ b/releases/page/52/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/53/index.html b/releases/page/53/index.html index 673778b9f9..86600be9bf 100644 --- a/releases/page/53/index.html +++ b/releases/page/53/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/54/index.html b/releases/page/54/index.html index e4e76d5795..6201f4e99c 100644 --- a/releases/page/54/index.html +++ b/releases/page/54/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/6/index.html b/releases/page/6/index.html index 2b9d934b24..8501a915df 100644 --- a/releases/page/6/index.html +++ b/releases/page/6/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/7/index.html b/releases/page/7/index.html index 6a69b72166..094ca35a2d 100644 --- a/releases/page/7/index.html +++ b/releases/page/7/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/8/index.html b/releases/page/8/index.html index e819c256b9..8eeaa7a70d 100644 --- a/releases/page/8/index.html +++ b/releases/page/8/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/releases/page/9/index.html b/releases/page/9/index.html index 8dfa5621f3..abfa84228f 100644 --- a/releases/page/9/index.html +++ b/releases/page/9/index.html @@ -1 +1 @@ - Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-18
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file + Debezium Releases Overview

    Debezium Releases Overview

    Series

    latest stable

    3.0

    2024-12-19
    Incubating implementation of MongoDB sink connector; Switch to Java 17/21

    stable

    2.7

    2024-12-11
    MariaDB connector is split from MySQL; MongoDB supports addtional conditions in incremental snapshot; Vitess connecto...

    Tested Versions

    3.0 2.7
    Java 17+ for connectors; 21+ for Debezium Server, Operator and Outbox extension
    11+
    Kafka Connect 3.1 and later
    2.x, 3.x
    MySQL Database: 8.0.x, 8.4.x, 9.0, 9.1
    Driver: 9.1.0
    Database: 8.0.x, 8.2
    Driver: 8.3.0
    MariaDB Database: 11.4.x
    Driver: 3.2.0
    Database: 11.4.3
    Driver: 3.2.0
    MongoDB Database: 6.0, 7.0, 8.0
    Driver: 4.11.0
    Database: 5.0, 6.0, 7.0
    Driver: 4.11.0
    PostgreSQL Database: 12, 13, 14, 15, 16, 17
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Database: 12, 13, 14, 15, 16
    Plug-ins: decoderbufs, pgoutput
    Driver: 42.6.1
    Oracle Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    Database: 12c, 19c, 21c
    Driver: 12.2.x, 19.x, 21.x
    OpenLogReplicator: 1.3.0
    SQL Server Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Database: 2017, 2019, 2022
    Driver: 12.4.2.jre8
    Cassandra 3 Database: 3.11.12
    Driver: 3.11.12
    Database: 3.11.12
    Driver: 3.11.12
    Cassandra 4 Database: 4.0.2
    Driver: 4.14.0
    Database: 4.0.2
    Driver: 4.14.0
    Cassandra 5 Database: 5.0.2
    Driver: 4.14.0
    Db2 Database: 11.5
    Driver: 11.5.0.0
    Database: 11.5
    Driver: 11.5.0.0
    Spanner Database: 6.x
    Driver: 6.x
    Database: 6.x
    Driver: 6.x
    Vitess Database: 12.0.x
    Driver: 17.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    Database: 12.0.x
    Driver: 12.0.0*
    * See the Vitess Connector documentation for limitations when using the connector with earlier Vitess versions
    JDBC sink Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Databases: Db2, MySQL, Oracle, PostgreSQL, SQL Server
    Informix Database: 12, 14
    Driver: 4.50.11
    Database: 12, 14
    Driver: 4.50.10
    Db2 for IBM i Database: 7.4
    Driver: 11.1
    Database: 7.4
    Driver: 11.1
    The Debezium connectors have been tested with the versions of Java, Apache Kafka (Connect), and databases listed above. Testing is done using Linux containers. Specific Debezium connectors might also be compatible with other database versions not listed here.
    \ No newline at end of file diff --git a/roadmap/index.html b/roadmap/index.html index a0a4cabe64..19ed649e48 100644 --- a/roadmap/index.html +++ b/roadmap/index.html @@ -1 +1 @@ - Debezium Roadmap

    Debezium Roadmap

    This page describes the roadmap for upcoming work on Debezium.

    Debezium is community-driven and as such the roadmap constantly evolves to reflect the users needs and contributions. You can find a fine-grained list of planned issues and feature requests in our issue tracker, but this page is a good starting point to see where we are going.

    This roadmap is subject to changes. Please get in touch if you think anything important is missing on the roadmap.

    The Debezium community pursues a time-boxed release scheme: minor releases (1.2, 1.3, etc.) are done at the end of every quarter, with preview releases (1.3.0.Alpha1, 1.3.0.Beta1, etc.) every three weeks. As our engineering capacity is limited, we’re focusing our efforts on a single release line at a time (e.g. 1.3), i.e. patch releases for the current stable minor release (e.g. 1.2.1.Final) are done in case of critical bug fixes only.

    2.7 - June 2024

    • Official MariaDB connector

    • Db2 z/OS source connector

    • Introduce an SPI to reduce memory footprint for multi-tenant databases with identical schemas

    • R&D for read-only incremental snapshots for other relational connectors

    • R&D to determine feasibility of using SQL Server transaction logs rather than capture instances

    • Design/Implement user-friendly offset manipulation (i.e, start at a specific position in the transaction logs)

    • Streaming from MongoDB collections

    • Sink connector for MongoDB

    • Additional monitoring - Quantimeter

    • Expand Oracle 23c support (pending Oracle’s EE release of 23c)

    • New Debezium UI R&D for integration with Debezium Server and Operator

    • Debezium Operator promoted to stable

    3.0 - September 2024

    • Java 17 baseline

    • Kafka 3.1 minimum baseline support

    • Source connector for InfluxDB time series database

    • New off-heap Oracle transaction buffer implementations, more details soon.

    • Exactly-Once support for other connectors

    • Remove deprecated code

    • Implement read-only incremental snapshots for relational connectors

    • And much more…​

    3.1 - December 2024

    • Oracle 12c support sunset (best-effort moving forward).

    • Create PoC for implementing sagas (long-running business transactions spanning multiple (micro-)services) using CDC and the outbox pattern

    • Explore and provide building blocks for implementing CQRS architectures based on change data streams

    Future Releases

    • Add schema history compaction tool

    • Provide a Debezium-specific SPI for describing change event schemas

    • API/SPI allowing to implement custom connectors on the foundations of Debezium

    • Exploration of creating aggregated events, based on the streams/data from multiple tables, so to enable use cases which need to incorporate data from multiple tables into a single output structure (e.g. an Elasticsearch document containing order and orderline info)

    • Support for Infinispan as a source

    • Allow to propagate custom contextual data in change messages (e.g. the business user doing a certain change)

    • Provide more detailed monitoring information about Debezium’s internal state and health via JMX (e.g. to spot loss of DB connection while the connector still is running)

    • Explore publication of change events via reactive data streams (on top of embedded connector)

    • New implementation of the Debezium embedded engine independent of Kafka Connect APIs

    • Debezium UI (Legacy) - New CLI tooling

      • Incremental Snapshot interface

      • Quick-start connector deployment

      • Support varied Kafka Connect and Debezium versions

      • Offset manipulation

    • Debezium Operator

      • Improved Observability and Metrics

      • R&D to consider multitasking support with Debezium Server

      • Integration with Distribution Builder

    Past Releases

    Please see the releases overview page to learn more about the contents of past Debezium releases.

    \ No newline at end of file + Debezium Roadmap

    Debezium Roadmap

    This page describes the roadmap for upcoming work on Debezium.

    Debezium is community-driven and as such the roadmap constantly evolves to reflect the users needs and contributions. You can find a fine-grained list of planned issues and feature requests in our issue tracker, but this page is a good starting point to see where we are going.

    This roadmap is subject to changes. Please get in touch if you think anything important is missing on the roadmap.

    The Debezium community pursues a time-boxed release scheme: minor releases (1.2, 1.3, etc.) are done at the end of every quarter, with preview releases (1.3.0.Alpha1, 1.3.0.Beta1, etc.) every three weeks. As our engineering capacity is limited, we’re focusing our efforts on a single release line at a time (e.g. 1.3), i.e. patch releases for the current stable minor release (e.g. 1.2.1.Final) are done in case of critical bug fixes only.

    2.7 - June 2024

    • Official MariaDB connector

    • Db2 z/OS source connector

    • Introduce an SPI to reduce memory footprint for multi-tenant databases with identical schemas

    • R&D for read-only incremental snapshots for other relational connectors

    • R&D to determine feasibility of using SQL Server transaction logs rather than capture instances

    • Design/Implement user-friendly offset manipulation (i.e, start at a specific position in the transaction logs)

    • Streaming from MongoDB collections

    • Sink connector for MongoDB

    • Additional monitoring - Quantimeter

    • Expand Oracle 23c support (pending Oracle’s EE release of 23c)

    • New Debezium UI R&D for integration with Debezium Server and Operator

    • Debezium Operator promoted to stable

    3.0 - September 2024

    • Java 17 baseline

    • Kafka 3.1 minimum baseline support

    • Source connector for InfluxDB time series database

    • New off-heap Oracle transaction buffer implementations, more details soon.

    • Exactly-Once support for other connectors

    • Remove deprecated code

    • Implement read-only incremental snapshots for relational connectors

    • And much more…​

    3.1 - December 2024

    • Oracle 12c support sunset (best-effort moving forward).

    • Create PoC for implementing sagas (long-running business transactions spanning multiple (micro-)services) using CDC and the outbox pattern

    • Explore and provide building blocks for implementing CQRS architectures based on change data streams

    Future Releases

    • Add schema history compaction tool

    • Provide a Debezium-specific SPI for describing change event schemas

    • API/SPI allowing to implement custom connectors on the foundations of Debezium

    • Exploration of creating aggregated events, based on the streams/data from multiple tables, so to enable use cases which need to incorporate data from multiple tables into a single output structure (e.g. an Elasticsearch document containing order and orderline info)

    • Support for Infinispan as a source

    • Allow to propagate custom contextual data in change messages (e.g. the business user doing a certain change)

    • Provide more detailed monitoring information about Debezium’s internal state and health via JMX (e.g. to spot loss of DB connection while the connector still is running)

    • Explore publication of change events via reactive data streams (on top of embedded connector)

    • New implementation of the Debezium embedded engine independent of Kafka Connect APIs

    • Debezium UI (Legacy) - New CLI tooling

      • Incremental Snapshot interface

      • Quick-start connector deployment

      • Support varied Kafka Connect and Debezium versions

      • Offset manipulation

    • Debezium Operator

      • Improved Observability and Metrics

      • R&D to consider multitasking support with Debezium Server

      • Integration with Distribution Builder

    Past Releases

    Please see the releases overview page to learn more about the contents of past Debezium releases.

    \ No newline at end of file diff --git a/tag/analytics/index.html b/tag/analytics/index.html index a6fdf1f4d9..3eae8de8f9 100644 --- a/tag/analytics/index.html +++ b/tag/analytics/index.html @@ -1 +1 @@ - Tag: analytics

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    \ No newline at end of file + Tag: analytics

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    \ No newline at end of file diff --git a/tag/announcement discussion survey/index.html b/tag/announcement discussion survey/index.html index fe668bf7bc..f3ddb32a87 100644 --- a/tag/announcement discussion survey/index.html +++ b/tag/announcement discussion survey/index.html @@ -1 +1 @@ - Tag: announcement discussion survey

    Debezium Blog

    The Debezium project is conducting our 2024 Community Feedback survey, and we want to hear from YOU!

    \ No newline at end of file + Tag: announcement discussion survey

    Debezium Blog

    The Debezium project is conducting our 2024 Community Feedback survey, and we want to hear from YOU!

    \ No newline at end of file diff --git a/tag/announcement/index.html b/tag/announcement/index.html index efc1f398fa..6333e4a633 100644 --- a/tag/announcement/index.html +++ b/tag/announcement/index.html @@ -1 +1 @@ - Tag: announcement

    Debezium Blog

    After consulting with the community both inside and outside of Red Hat, we have made the decision to submit a request for the Debezium project to join the Commonhaus Foundation.

    tl;dr

    We are considering moving Debezium to a Software Foundation to expand our community, become more open and transparent in our roadmap and decisions, and encourage multi-vendor participation and execution.

    \ No newline at end of file + Tag: announcement

    Debezium Blog

    After consulting with the community both inside and outside of Red Hat, we have made the decision to submit a request for the Debezium project to join the Commonhaus Foundation.

    tl;dr

    We are considering moving Debezium to a Software Foundation to expand our community, become more open and transparent in our roadmap and decisions, and encourage multi-vendor participation and execution.

    \ No newline at end of file diff --git a/tag/apache kafka/index.html b/tag/apache kafka/index.html index c9b42f171f..42941db3bf 100644 --- a/tag/apache kafka/index.html +++ b/tag/apache kafka/index.html @@ -1 +1 @@ - Tag: apache kafka

    Debezium Blog

    Apache Kafka 2.8 allows for a first glimpse into the ZooKeeper-less future of the widely used event streaming platform: shipping with a preview of KIP-500 ("Replace ZooKeeper with a Self-Managed Metadata Quorum"), you can now run Kafka clusters without the need for setting up and operating Apache ZooKeeper. This does not only simplify running Kafka from an operational perspective, the new metadata quorum implementation (named "KRaft", Kafka Raft metadata mode) also should provide much better scaling characteristics, for instance when it comes to large numbers of topics and partitions.

    \ No newline at end of file + Tag: apache kafka

    Debezium Blog

    Apache Kafka 2.8 allows for a first glimpse into the ZooKeeper-less future of the widely used event streaming platform: shipping with a preview of KIP-500 ("Replace ZooKeeper with a Self-Managed Metadata Quorum"), you can now run Kafka clusters without the need for setting up and operating Apache ZooKeeper. This does not only simplify running Kafka from an operational perspective, the new metadata quorum implementation (named "KRaft", Kafka Raft metadata mode) also should provide much better scaling characteristics, for instance when it comes to large numbers of topics and partitions.

    \ No newline at end of file diff --git a/tag/apache-kafka/index.html b/tag/apache-kafka/index.html index ab3d0082ba..fd36db44b6 100644 --- a/tag/apache-kafka/index.html +++ b/tag/apache-kafka/index.html @@ -1 +1 @@ - Tag: apache-kafka

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    \ No newline at end of file + Tag: apache-kafka

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    \ No newline at end of file diff --git a/tag/apicurio/index.html b/tag/apicurio/index.html index 15f85ceb56..6d92afa1e4 100644 --- a/tag/apicurio/index.html +++ b/tag/apicurio/index.html @@ -1 +1 @@ - Tag: apicurio

    Debezium Blog

    Change events streamed from a database by Debezium are (in developer parlance) strongly typed. This means that event consumers should be aware of the types of data conveyed in the events. This problem of passing along message type data can be solved in multiple ways:

    \ No newline at end of file + Tag: apicurio

    Debezium Blog

    Change events streamed from a database by Debezium are (in developer parlance) strongly typed. This means that event consumers should be aware of the types of data conveyed in the events. This problem of passing along message type data can be solved in multiple ways:

    \ No newline at end of file diff --git a/tag/avro/index.html b/tag/avro/index.html index a85543a893..42211b7aae 100644 --- a/tag/avro/index.html +++ b/tag/avro/index.html @@ -1 +1 @@ - Tag: avro

    Debezium Blog

    Change events streamed from a database by Debezium are (in developer parlance) strongly typed. This means that event consumers should be aware of the types of data conveyed in the events. This problem of passing along message type data can be solved in multiple ways:

    Although Debezium makes it easy to capture database changes and record them in Kafka, one of the more important decisions you have to make is how those change events will be serialized in Kafka. Every message in Kafka has a key and a value, and to Kafka these are opaque byte arrays. But when you set up Kafka Connect, you have to say how the Debezium event keys and values should be serialized to a binary form, and your consumers will also have to deserialize them back into a usable form.

    Debezium event keys and values are both structured, so JSON is certainly a reasonable option — it’s flexible, ubiquitous, and language agnostic, but on the other hand it’s quite verbose. One alternative is Avro, which is also flexible and language agnostic, but also faster and results in smaller binary representations. Using Avro requires a bit more setup effort on your part and some additional software, but the advantages are often worth it.

    \ No newline at end of file + Tag: avro

    Debezium Blog

    Change events streamed from a database by Debezium are (in developer parlance) strongly typed. This means that event consumers should be aware of the types of data conveyed in the events. This problem of passing along message type data can be solved in multiple ways:

    Although Debezium makes it easy to capture database changes and record them in Kafka, one of the more important decisions you have to make is how those change events will be serialized in Kafka. Every message in Kafka has a key and a value, and to Kafka these are opaque byte arrays. But when you set up Kafka Connect, you have to say how the Debezium event keys and values should be serialized to a binary form, and your consumers will also have to deserialize them back into a usable form.

    Debezium event keys and values are both structured, so JSON is certainly a reasonable option — it’s flexible, ubiquitous, and language agnostic, but on the other hand it’s quite verbose. One alternative is Avro, which is also flexible and language agnostic, but also faster and results in smaller binary representations. Using Avro requires a bit more setup effort on your part and some additional software, but the advantages are often worth it.

    \ No newline at end of file diff --git a/tag/aws/index.html b/tag/aws/index.html index e91606cc0d..f243046005 100644 --- a/tag/aws/index.html +++ b/tag/aws/index.html @@ -1 +1 @@ - Tag: aws

    Debezium Blog

    In this blog post, we are going to discuss how Delhivery, the leading supply chain services company in India, is using Debezium to power a lot of different business use-cases ranging from driving event driven microservices, providing data integration and moving operational data to a data warehouse for real-time analytics and reporting. We will also take a look at the early mistakes we made when integrating Debezium and how we solved them so that any future users can avoid them, discuss one of the more challenging production incidents we faced and how Debezium helped ensure we could recover without any data loss. In closing, we discuss what value Debezium has provided us, areas where we believe there is a scope for improvement and how Debezium fits into our future goals.

    \ No newline at end of file + Tag: aws

    Debezium Blog

    In this blog post, we are going to discuss how Delhivery, the leading supply chain services company in India, is using Debezium to power a lot of different business use-cases ranging from driving event driven microservices, providing data integration and moving operational data to a data warehouse for real-time analytics and reporting. We will also take a look at the early mistakes we made when integrating Debezium and how we solved them so that any future users can avoid them, discuss one of the more challenging production incidents we faced and how Debezium helped ensure we could recover without any data loss. In closing, we discuss what value Debezium has provided us, areas where we believe there is a scope for improvement and how Debezium fits into our future goals.

    \ No newline at end of file diff --git a/tag/batch/index.html b/tag/batch/index.html index 6fda61aa38..86b466575e 100644 --- a/tag/batch/index.html +++ b/tag/batch/index.html @@ -1 +1 @@ - Tag: batch

    Debezium Blog

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    \ No newline at end of file + Tag: batch

    Debezium Blog

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    \ No newline at end of file diff --git a/tag/caassandra/index.html b/tag/caassandra/index.html index 7412ced409..8749509d07 100644 --- a/tag/caassandra/index.html +++ b/tag/caassandra/index.html @@ -1 +1 @@ - Tag: caassandra

    Debezium Blog

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    \ No newline at end of file + Tag: caassandra

    Debezium Blog

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    \ No newline at end of file diff --git a/tag/camel/index.html b/tag/camel/index.html index 779771bf0a..2b6b4680e6 100644 --- a/tag/camel/index.html +++ b/tag/camel/index.html @@ -1 +1 @@ - Tag: camel

    Debezium Blog

    One of the typical Debezium uses cases is to use change data capture to integrate a legacy system with other systems in the organization. There are multiple ways how to achieve this goal

    • Write data to Kafka using Debezium and follow with a combination of Kafka Streams pipelines and Kafka Connect connectors to deliver the changes to other systems

    • Use Debezium Embedded engine in a Java standalone application and write the integration code using plain Java; that’s often used to send change events to alternative messaging infrastructure such as Amazon Kinesis, Google Pub/Sub etc.

    • Use an existing integration framework or service bus to express the pipeline logic

    This article is focusing on the third option - a dedicated integration framework.

    \ No newline at end of file + Tag: camel

    Debezium Blog

    One of the typical Debezium uses cases is to use change data capture to integrate a legacy system with other systems in the organization. There are multiple ways how to achieve this goal

    • Write data to Kafka using Debezium and follow with a combination of Kafka Streams pipelines and Kafka Connect connectors to deliver the changes to other systems

    • Use Debezium Embedded engine in a Java standalone application and write the integration code using plain Java; that’s often used to send change events to alternative messaging infrastructure such as Amazon Kinesis, Google Pub/Sub etc.

    • Use an existing integration framework or service bus to express the pipeline logic

    This article is focusing on the third option - a dedicated integration framework.

    \ No newline at end of file diff --git a/tag/cassandra/index.html b/tag/cassandra/index.html index 179262449c..87600efacb 100644 --- a/tag/cassandra/index.html +++ b/tag/cassandra/index.html @@ -1 +1 @@ - Tag: cassandra

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    This post originally appeared on the WePay Engineering blog.

    In the first half of this blog post series, we explained our decision-making process of designing a streaming data pipeline for Cassandra at WePay. In this post, we will break down the pipeline into three sections and discuss each of them in more detail:

    1. Cassandra to Kafka with CDC agent

    2. Kafka with BigQuery with KCBQ

    3. Transformation with BigQuery view

    This post originally appeared on the WePay Engineering blog.

    Historically, MySQL had been the de-facto database of choice for microservices at WePay. As WePay scales, the sheer volume of data written into some of our microservice databases demanded us to make a scaling decision between sharded MySQL (i.e. Vitess) and switching to a natively sharded NoSQL database. After a series of evaluations, we picked Cassandra, a NoSQL database, primarily because of its high availability, horizontal scalability, and ability to handle high write throughput.

    \ No newline at end of file + Tag: cassandra

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    This post originally appeared on the WePay Engineering blog.

    In the first half of this blog post series, we explained our decision-making process of designing a streaming data pipeline for Cassandra at WePay. In this post, we will break down the pipeline into three sections and discuss each of them in more detail:

    1. Cassandra to Kafka with CDC agent

    2. Kafka with BigQuery with KCBQ

    3. Transformation with BigQuery view

    This post originally appeared on the WePay Engineering blog.

    Historically, MySQL had been the de-facto database of choice for microservices at WePay. As WePay scales, the sheer volume of data written into some of our microservice databases demanded us to make a scaling decision between sharded MySQL (i.e. Vitess) and switching to a natively sharded NoSQL database. After a series of evaluations, we picked Cassandra, a NoSQL database, primarily because of its high availability, horizontal scalability, and ability to handle high write throughput.

    \ No newline at end of file diff --git a/tag/cdc/index.html b/tag/cdc/index.html index 3250f0a211..352332b8e2 100644 --- a/tag/cdc/index.html +++ b/tag/cdc/index.html @@ -1 +1 @@ - Tag: cdc

    Debezium Blog

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    \ No newline at end of file + Tag: cdc

    Debezium Blog

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    \ No newline at end of file diff --git a/tag/channels/index.html b/tag/channels/index.html index 3943dc729a..59305f7ccb 100644 --- a/tag/channels/index.html +++ b/tag/channels/index.html @@ -1 +1 @@ - Tag: channels

    Debezium Blog

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    \ No newline at end of file + Tag: channels

    Debezium Blog

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    \ No newline at end of file diff --git a/tag/community-stories/index.html b/tag/community-stories/index.html index 5e837fe774..6f0e680916 100644 --- a/tag/community-stories/index.html +++ b/tag/community-stories/index.html @@ -1 +1 @@ - Tag: community-stories

    Debezium Blog

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today, it’s my pleasure to talk to Lars M Johansson.

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today it’s my pleasure to talk to Sergei Morozov.

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    \ No newline at end of file + Tag: community-stories

    Debezium Blog

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today, it’s my pleasure to talk to Lars M Johansson.

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today it’s my pleasure to talk to Sergei Morozov.

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    \ No newline at end of file diff --git a/tag/community/index.html b/tag/community/index.html index 4a2c2d2a6a..5dd5d3fd7e 100644 --- a/tag/community/index.html +++ b/tag/community/index.html @@ -1 +1 @@ - Tag: community

    Debezium Blog

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today, it’s my pleasure to talk to Lars M Johansson.

    The Debezium UI team continues to add support for more features, allowing users to configure connectors more easily. In this article, we’ll describe and demonstrate how to provide the additional properties for configuration that the UI does not expose by default. Read further for more information!

    Hi everyone, my name is Mario Fiore Vitale and I recently joined Red Hat and the Debezium team.

    I am a very curious person that follows a continuous learning approach, I like to keep growing my skills. I care about code quality and readability.

    I have about 9+ years of experience and have worked for consultancy, startup, and enterprise product companies in different sectors. In my previously experience I had the chance to work on architecture re-design project to split a monolith into a microservices application. During this experience I gained experience with different technologies such as Kafka, Elasticsearch, Redis, Kubernetes, VictoriaMetrics, Spring Framework, and a bit of Cassandra.

    Why Am I here?

    In November last year, we announced we were looking for reinforcements for the team. And I have two pieces of news for you today: a good one and an even better one.

    As you are probably well aware, Gunnar Morling has stepped down from his position as Debezium project lead and is now pursuing new exciting adventures. It is sad, but every cloud has a silver lining!

    What can it be? We (the Debezium team and Red Hat) are hiring! Are you a community contributor? Do you have any pull requests under your belt? Are you a happy Debezium user and eager to do more, or are you a seasoned Java developer looking for work in an exciting and inclusive open-source environment?

    Some time in early 2017, I got a meeting invite from Debezium’s founder, Randall Hauch. He was about to begin a new chapter in his professional career and was looking for someone to take over as the project lead for Debezium. So we hopped on a call to talk things through, and I was immediately sold on the concept of change data capture, its large number of potential use cases and applications, and the idea of making this available to the community as open-source. After some short consideration I decided to take up this opportunity, and without a doubt this has been one of the best decisions I’ve ever made in my job.

    When developing the tests for your project, sooner or later you will probably get into the situation when some of the tests fail randomly. These tests, also known as flaky tests, are very unpleasant as you never know if the failure was random or there is a regression in your code. In the worst case you just ignore these tests because you know they are flaky. Most of the testing frameworks even have a dedicated annotation or other means to express that the test is flaky and if it fails, the failure should be ignored. The value of such a test is very questionable. The best thing you can do with such a test is of course to fix it so that it doesn’t fail randomly. That’s easy to say, but harder to do. The hardest part is usually to make the test fail in your development environment so that you can debug it and understand why it fails and what is the root cause of the failure. In this blog post I’ll try to show a few techniques which may help you to simulate random test failures on you local machine.

    As you probably noticed, we have started work on Debezium 2.0. One of the planned changes for the 2.0 release is to switch to Java 11 as a baseline. While some Java build providers still support Java 8, other Java 8 distributions already reached their end of life/support. Users are moving to Java 11 anyways, as surveys like New Relic’s State of the Java Ecosystem Report indicate. But it is not only matter of support: Java 11 comes with various performance improvements, useful tools like JDK Flight Recorder, which was open-sourced in Java 11, and more. So we felt it was about time to start thinking about using a more recent JDK as the baseline for Debezium, and the new major release is a natural milestone when to do the switch.

    Hi everyone, my name is Vojtěch Juránek and I recently joined the Debezium team.

    Most of my professional IT career I’ve spent at Red Hat. I have a background in particle physics, but I did quite a lot programming even before joining Red Hat, when working on simulations of high-energy particle collisions and their data analysis. The science is by default open and all software I was using was open source as well. Here started my love for open source.

    At ScyllaDB, we develop a high-performance NoSQL database Scylla, API-compatible with Apache Cassandra, Amazon DynamoDB and Redis. Earlier this year, we introduced support for Change Data Capture in Scylla 4.3. This new feature seemed like a perfect match for integration with the Apache Kafka ecosystem, so we developed the Scylla CDC Source Connector using the Debezium framework. In this blogpost we will cover the basic structure of Scylla’s CDC, reasons we chose the Debezium framework and design decisions we made.

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today it’s my pleasure to talk to Sergei Morozov.

    Welcome to the newest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    It’s been a long time since our last edition. But we are back again! In case you missed our last edition, you can check it out here.

    Hello everyone, my name is Anisha Mohanty and I recently joined Red Hat and the Debezium team.

    I started my journey with Red Hat in April 2020 after completing my graduation. I was introduced to open source in my early college days, but I wasn’t aware of how organizations work and wanted to get the essence of open source ethics and values. That is something that I am fascinated to learn as I joined Red Hat.

    My work started under the Data Virtualization team with Teiid and then under the GRAPHQLCRUD project which is a standard for a generic query interface on top of GraphQL. The project has started well and is in great shape right now. We have successfully added CRUD capabilities, paging, and filtering specifications.

    Coming to Debezium, I first heard about it as some DV members started contributing here, well back then it was a completely new thing for me. I started exploring more, and it was not long when I had my first interaction with Gunnar and Jiri. With a warm welcome and great team here, I am really excited to work with the Debezium Community.

    Over the last five years, Debezium has become a leading open-source solution for change data capture for a variety of databases. Users from all kinds of industries work with Debezium for use cases like replication of data from operational databases into data warehouses, updating caches and search indexes, driving streaming queries via Kafka Streams or Apache Flink, synchronizing data between microservices, and many more.

    When talking to Debezium users, we generally receive very good feedback on the range of applications enabled by Debezium and its flexibility: e.g. each connector can be configured and fine-tuned in many ways, depending on your specific requirements. A large number of metrics provide deep insight into the state of running Debezium connectors, allowing to safely operate CDC pipelines also in huge installations with thousands of connectors.

    All this comes at the cost of a learning curve, though: users new to Debezium need to understand the different options and settings as well as learn about best practices for running Debezium in production. We’re therefore constantly exploring how the user experience of Debezium can be further improved, allowing people to set up and operate its connectors more easily.

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    Hello everyone, my name is René Kerner and I recently joined Red Hat and the Debezium team.

    I was working at trivago since 2011, and in 2016 we started using Debezium at version 0.4/0.5 for capturing clickstreams in the offshore datacenters into Kafka and aggregate them in the central cluster. We really intensified Debezium usage within one year and in 2017 we also used it for trivago’s main data.

    In 2014 I did my first OSS contributions to Composer, PHP’s dependency management and gave my first talk on it at the Developer Conference (called code.talks for many years now). Then in 2017 I did my first contributions to Debezium with work on the MySQL snapshot process and fixing a MySQL TIME data type issue.

    In 2018 I left trivago and started working at Codecentric as a consultant for software architecture and development (mainly JVM focus) and Apache Kafka, doing many trainings and workshops at German "Fortune 500" companies (insurances, industrial sector, media). I was doing lots of networking at that time, where I learned how awesome the community around Kafka is. I was always quite sad I didn’t have more time to focus on OSS projects.

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

    Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    Hello everyone, my name is Chris Cranford and I recently joined the Debezium team.

    My journey at Red Hat began just over three years ago; however I have been in this line of work for nearly twenty years. All throughout my career, I have advocated and supported open source software. Many of my initial software endeavors were based on open source software, several which are still heavily used today such as Hibernate ORM.

    When I first learned about the Debezium project last year, I was very excited about it right away.

    I could see how this project would be very useful for many people out there and I was very impressed by the professional way it was set up: a solid architecture for change data capture based on Apache Kafka, a strong focus on robustness and correctness also in the case of failures, the overall idea of creating a diverse eco-system of CDC connectors. All that based on the principles of open source, combined with extensive documentation from day one, a friendly and welcoming web site and a great getting-started experience.

    So you can imagine that I was more than enthusiastic about the opportunity to take over the role of Debezium’s project lead. Debezium and CDC have close links to some data-centric projects I’ve been previously working on and also tie in with ideas I’ve been pursuing around CQRS, even sourcing and denormalization. As core member of the Hibernate team at Red Hat, I’ve implemented the initial Elasticsearch support for Hibernate Search (which deals with full-text index updates via JPA/Hibernate). I’ve also contributed to Hibernate OGM - a project which connects JPA and the world of NoSQL. One of the plans for OGM is to create a declarative denormalization engine for creating read models optimized for specific use cases. It will be very interesting to see how this plays together with the capabilities provided by Debezium.

    Just before I started the Debezium project in early 2016, Martin Kleppmann gave several presentations about turning the database inside out and how his Bottled Water project demonstrated the importantance that change data capture can play in using Kafka for stream processing. Then Kafka Connect was announced, and at that point it seemed obvious to me that Kafka Connect was the foundation upon which practical and reusable change data capture can be built. As these techniques and technologies were becoming more important to Red Hat, I was given the opportunity to start a new open source project and community around building great CDC connectors for a variety of databases management systems.

    Over the past few years, we have created Kafka Connect connectors for MySQL, then MongoDB, and most recently PostgreSQL. Each were initially limited and had a number of problems and issues, but over time more and more people have tried the connectors, asked questions, answered questions, mentioned Debezium on Twitter, tested connectors in their own environments, reported problems, fixed bugs, discussed limitations and potential new features, implemented enhancements and new features, improved the documentation, and wrote blog posts. Simply put, people with similar needs and interests have worked together and have formed a community. Additional connectors for Oracle and SQL Server are in the works, but could use some help to move things along more quickly.

    It’s really exciting to see how far we’ve come and how the Debezium community continues to evolve and grow. And it’s perhaps as good a time as any to hand the reigns over to someone else. In fact, after nearly 10 wonderful years at Red Hat, I’m making a bigger change and as of today am part of Confluent’s engineering team, where I expect to play a more active role in the broader Kafka community and more directly with Kafka Connect and Kafka Streams. I definitely plan to stay involved in the Debezium community, but will no longer be leading the project. That role will instead be filled by Gunnar Morling, who’s recently joined the Debezium community but has extensive experience in open source, the Hibernate community, and the Bean Validation specification effort. Gunnar is a great guy and an excellent developer, and will be an excellent lead for the Debezium community.

    \ No newline at end of file + Tag: community

    Debezium Blog

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today, it’s my pleasure to talk to Lars M Johansson.

    The Debezium UI team continues to add support for more features, allowing users to configure connectors more easily. In this article, we’ll describe and demonstrate how to provide the additional properties for configuration that the UI does not expose by default. Read further for more information!

    Hi everyone, my name is Mario Fiore Vitale and I recently joined Red Hat and the Debezium team.

    I am a very curious person that follows a continuous learning approach, I like to keep growing my skills. I care about code quality and readability.

    I have about 9+ years of experience and have worked for consultancy, startup, and enterprise product companies in different sectors. In my previously experience I had the chance to work on architecture re-design project to split a monolith into a microservices application. During this experience I gained experience with different technologies such as Kafka, Elasticsearch, Redis, Kubernetes, VictoriaMetrics, Spring Framework, and a bit of Cassandra.

    Why Am I here?

    In November last year, we announced we were looking for reinforcements for the team. And I have two pieces of news for you today: a good one and an even better one.

    As you are probably well aware, Gunnar Morling has stepped down from his position as Debezium project lead and is now pursuing new exciting adventures. It is sad, but every cloud has a silver lining!

    What can it be? We (the Debezium team and Red Hat) are hiring! Are you a community contributor? Do you have any pull requests under your belt? Are you a happy Debezium user and eager to do more, or are you a seasoned Java developer looking for work in an exciting and inclusive open-source environment?

    Some time in early 2017, I got a meeting invite from Debezium’s founder, Randall Hauch. He was about to begin a new chapter in his professional career and was looking for someone to take over as the project lead for Debezium. So we hopped on a call to talk things through, and I was immediately sold on the concept of change data capture, its large number of potential use cases and applications, and the idea of making this available to the community as open-source. After some short consideration I decided to take up this opportunity, and without a doubt this has been one of the best decisions I’ve ever made in my job.

    When developing the tests for your project, sooner or later you will probably get into the situation when some of the tests fail randomly. These tests, also known as flaky tests, are very unpleasant as you never know if the failure was random or there is a regression in your code. In the worst case you just ignore these tests because you know they are flaky. Most of the testing frameworks even have a dedicated annotation or other means to express that the test is flaky and if it fails, the failure should be ignored. The value of such a test is very questionable. The best thing you can do with such a test is of course to fix it so that it doesn’t fail randomly. That’s easy to say, but harder to do. The hardest part is usually to make the test fail in your development environment so that you can debug it and understand why it fails and what is the root cause of the failure. In this blog post I’ll try to show a few techniques which may help you to simulate random test failures on you local machine.

    As you probably noticed, we have started work on Debezium 2.0. One of the planned changes for the 2.0 release is to switch to Java 11 as a baseline. While some Java build providers still support Java 8, other Java 8 distributions already reached their end of life/support. Users are moving to Java 11 anyways, as surveys like New Relic’s State of the Java Ecosystem Report indicate. But it is not only matter of support: Java 11 comes with various performance improvements, useful tools like JDK Flight Recorder, which was open-sourced in Java 11, and more. So we felt it was about time to start thinking about using a more recent JDK as the baseline for Debezium, and the new major release is a natural milestone when to do the switch.

    Hi everyone, my name is Vojtěch Juránek and I recently joined the Debezium team.

    Most of my professional IT career I’ve spent at Red Hat. I have a background in particle physics, but I did quite a lot programming even before joining Red Hat, when working on simulations of high-energy particle collisions and their data analysis. The science is by default open and all software I was using was open source as well. Here started my love for open source.

    At ScyllaDB, we develop a high-performance NoSQL database Scylla, API-compatible with Apache Cassandra, Amazon DynamoDB and Redis. Earlier this year, we introduced support for Change Data Capture in Scylla 4.3. This new feature seemed like a perfect match for integration with the Apache Kafka ecosystem, so we developed the Scylla CDC Source Connector using the Debezium framework. In this blogpost we will cover the basic structure of Scylla’s CDC, reasons we chose the Debezium framework and design decisions we made.

    Welcome to the latest edition of "Debezium Community Stories With…​", a series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. Today it’s my pleasure to talk to Sergei Morozov.

    Welcome to the newest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    It’s been a long time since our last edition. But we are back again! In case you missed our last edition, you can check it out here.

    Hello everyone, my name is Anisha Mohanty and I recently joined Red Hat and the Debezium team.

    I started my journey with Red Hat in April 2020 after completing my graduation. I was introduced to open source in my early college days, but I wasn’t aware of how organizations work and wanted to get the essence of open source ethics and values. That is something that I am fascinated to learn as I joined Red Hat.

    My work started under the Data Virtualization team with Teiid and then under the GRAPHQLCRUD project which is a standard for a generic query interface on top of GraphQL. The project has started well and is in great shape right now. We have successfully added CRUD capabilities, paging, and filtering specifications.

    Coming to Debezium, I first heard about it as some DV members started contributing here, well back then it was a completely new thing for me. I started exploring more, and it was not long when I had my first interaction with Gunnar and Jiri. With a warm welcome and great team here, I am really excited to work with the Debezium Community.

    Over the last five years, Debezium has become a leading open-source solution for change data capture for a variety of databases. Users from all kinds of industries work with Debezium for use cases like replication of data from operational databases into data warehouses, updating caches and search indexes, driving streaming queries via Kafka Streams or Apache Flink, synchronizing data between microservices, and many more.

    When talking to Debezium users, we generally receive very good feedback on the range of applications enabled by Debezium and its flexibility: e.g. each connector can be configured and fine-tuned in many ways, depending on your specific requirements. A large number of metrics provide deep insight into the state of running Debezium connectors, allowing to safely operate CDC pipelines also in huge installations with thousands of connectors.

    All this comes at the cost of a learning curve, though: users new to Debezium need to understand the different options and settings as well as learn about best practices for running Debezium in production. We’re therefore constantly exploring how the user experience of Debezium can be further improved, allowing people to set up and operate its connectors more easily.

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    Hello everyone, my name is René Kerner and I recently joined Red Hat and the Debezium team.

    I was working at trivago since 2011, and in 2016 we started using Debezium at version 0.4/0.5 for capturing clickstreams in the offshore datacenters into Kafka and aggregate them in the central cluster. We really intensified Debezium usage within one year and in 2017 we also used it for trivago’s main data.

    In 2014 I did my first OSS contributions to Composer, PHP’s dependency management and gave my first talk on it at the Developer Conference (called code.talks for many years now). Then in 2017 I did my first contributions to Debezium with work on the MySQL snapshot process and fixing a MySQL TIME data type issue.

    In 2018 I left trivago and started working at Codecentric as a consultant for software architecture and development (mainly JVM focus) and Apache Kafka, doing many trainings and workshops at German "Fortune 500" companies (insurances, industrial sector, media). I was doing lots of networking at that time, where I learned how awesome the community around Kafka is. I was always quite sad I didn’t have more time to focus on OSS projects.

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

    Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    Hello everyone, my name is Chris Cranford and I recently joined the Debezium team.

    My journey at Red Hat began just over three years ago; however I have been in this line of work for nearly twenty years. All throughout my career, I have advocated and supported open source software. Many of my initial software endeavors were based on open source software, several which are still heavily used today such as Hibernate ORM.

    When I first learned about the Debezium project last year, I was very excited about it right away.

    I could see how this project would be very useful for many people out there and I was very impressed by the professional way it was set up: a solid architecture for change data capture based on Apache Kafka, a strong focus on robustness and correctness also in the case of failures, the overall idea of creating a diverse eco-system of CDC connectors. All that based on the principles of open source, combined with extensive documentation from day one, a friendly and welcoming web site and a great getting-started experience.

    So you can imagine that I was more than enthusiastic about the opportunity to take over the role of Debezium’s project lead. Debezium and CDC have close links to some data-centric projects I’ve been previously working on and also tie in with ideas I’ve been pursuing around CQRS, even sourcing and denormalization. As core member of the Hibernate team at Red Hat, I’ve implemented the initial Elasticsearch support for Hibernate Search (which deals with full-text index updates via JPA/Hibernate). I’ve also contributed to Hibernate OGM - a project which connects JPA and the world of NoSQL. One of the plans for OGM is to create a declarative denormalization engine for creating read models optimized for specific use cases. It will be very interesting to see how this plays together with the capabilities provided by Debezium.

    Just before I started the Debezium project in early 2016, Martin Kleppmann gave several presentations about turning the database inside out and how his Bottled Water project demonstrated the importantance that change data capture can play in using Kafka for stream processing. Then Kafka Connect was announced, and at that point it seemed obvious to me that Kafka Connect was the foundation upon which practical and reusable change data capture can be built. As these techniques and technologies were becoming more important to Red Hat, I was given the opportunity to start a new open source project and community around building great CDC connectors for a variety of databases management systems.

    Over the past few years, we have created Kafka Connect connectors for MySQL, then MongoDB, and most recently PostgreSQL. Each were initially limited and had a number of problems and issues, but over time more and more people have tried the connectors, asked questions, answered questions, mentioned Debezium on Twitter, tested connectors in their own environments, reported problems, fixed bugs, discussed limitations and potential new features, implemented enhancements and new features, improved the documentation, and wrote blog posts. Simply put, people with similar needs and interests have worked together and have formed a community. Additional connectors for Oracle and SQL Server are in the works, but could use some help to move things along more quickly.

    It’s really exciting to see how far we’ve come and how the Debezium community continues to evolve and grow. And it’s perhaps as good a time as any to hand the reigns over to someone else. In fact, after nearly 10 wonderful years at Red Hat, I’m making a bigger change and as of today am part of Confluent’s engineering team, where I expect to play a more active role in the broader Kafka community and more directly with Kafka Connect and Kafka Streams. I definitely plan to stay involved in the Debezium community, but will no longer be leading the project. That role will instead be filled by Gunnar Morling, who’s recently joined the Debezium community but has extensive experience in open source, the Hibernate community, and the Bean Validation specification effort. Gunnar is a great guy and an excellent developer, and will be an excellent lead for the Debezium community.

    \ No newline at end of file diff --git a/tag/connectors/index.html b/tag/connectors/index.html index ea28f48b2e..5f169ea3ec 100644 --- a/tag/connectors/index.html +++ b/tag/connectors/index.html @@ -1 +1 @@ - Tag: connectors

    Debezium Blog

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    \ No newline at end of file + Tag: connectors

    Debezium Blog

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    \ No newline at end of file diff --git a/tag/containers/index.html b/tag/containers/index.html index 5a12deb94b..9aa3a44071 100644 --- a/tag/containers/index.html +++ b/tag/containers/index.html @@ -1 +1 @@ - Tag: containers

    Debezium Blog

    As you may have noticed, the Docker company recently announced a reduction of the free organization accounts offering. The Docker company wanted to provide for free organization accounts only for Docker-Sponsored Open Source (DSOS) projects. Debezium project doesn’t meet their definition of open source project as we have a pathway to commercialization. As the accounts ought to be terminated in 30 days, we immediately started to work on moving out the Debezium project from Docker Hub.

    \ No newline at end of file + Tag: containers

    Debezium Blog

    As you may have noticed, the Docker company recently announced a reduction of the free organization accounts offering. The Docker company wanted to provide for free organization accounts only for Docker-Sponsored Open Source (DSOS) projects. Debezium project doesn’t meet their definition of open source project as we have a pathway to commercialization. As the accounts ought to be terminated in 30 days, we immediately started to work on moving out the Debezium project from Docker Hub.

    \ No newline at end of file diff --git a/tag/cqrs/index.html b/tag/cqrs/index.html index 2321f7d578..3fd5c9305b 100644 --- a/tag/cqrs/index.html +++ b/tag/cqrs/index.html @@ -1 +1 @@ - Tag: cqrs

    Debezium Blog

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    \ No newline at end of file + Tag: cqrs

    Debezium Blog

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    \ No newline at end of file diff --git a/tag/custom/index.html b/tag/custom/index.html index b4561beb85..c8e4c1c4c2 100644 --- a/tag/custom/index.html +++ b/tag/custom/index.html @@ -1 +1 @@ - Tag: custom

    Debezium Blog

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    \ No newline at end of file + Tag: custom

    Debezium Blog

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    \ No newline at end of file diff --git a/tag/datalake/index.html b/tag/datalake/index.html index b809f015e9..5abab091f5 100644 --- a/tag/datalake/index.html +++ b/tag/datalake/index.html @@ -1 +1 @@ - Tag: datalake

    Debezium Blog

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    \ No newline at end of file + Tag: datalake

    Debezium Blog

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    \ No newline at end of file diff --git a/tag/db2/index.html b/tag/db2/index.html index 07dcc308f7..d8acc48eca 100644 --- a/tag/db2/index.html +++ b/tag/db2/index.html @@ -1 +1 @@ - Tag: db2

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    We have developed a Debezium connector for usage with Db2 which is now available as part of the Debezium incubator. Here we describe the use case we have for Change Data Capture (CDC), the various approaches that already exist in the Db2 ecology, and how we came to Debezium. In addition, we motivate the approach we took to implementing the Db2 Debezium connector.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    \ No newline at end of file + Tag: db2

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    We have developed a Debezium connector for usage with Db2 which is now available as part of the Debezium incubator. Here we describe the use case we have for Change Data Capture (CDC), the various approaches that already exist in the Db2 ecology, and how we came to Debezium. In addition, we motivate the approach we took to implementing the Db2 Debezium connector.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    \ No newline at end of file diff --git a/tag/ddd/index.html b/tag/ddd/index.html index c869692d67..6ade0a099e 100644 --- a/tag/ddd/index.html +++ b/tag/ddd/index.html @@ -1 +1 @@ - Tag: ddd

    Debezium Blog

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    \ No newline at end of file + Tag: ddd

    Debezium Blog

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    \ No newline at end of file diff --git a/tag/debezium-server/index.html b/tag/debezium-server/index.html index bb96252343..aa743990f4 100644 --- a/tag/debezium-server/index.html +++ b/tag/debezium-server/index.html @@ -1 +1 @@ - Tag: debezium-server

    Debezium Blog

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    \ No newline at end of file + Tag: debezium-server

    Debezium Blog

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    \ No newline at end of file diff --git a/tag/debezium-ui/index.html b/tag/debezium-ui/index.html index 21e6414568..3d00adc829 100644 --- a/tag/debezium-ui/index.html +++ b/tag/debezium-ui/index.html @@ -1 +1 @@ - Tag: debezium-ui

    Debezium Blog

    The Debezium UI team continues to add support for more features, allowing users to configure connectors more easily. In this article, we’ll describe and demonstrate how to provide the additional properties for configuration that the UI does not expose by default. Read further for more information!

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    Over the last five years, Debezium has become a leading open-source solution for change data capture for a variety of databases. Users from all kinds of industries work with Debezium for use cases like replication of data from operational databases into data warehouses, updating caches and search indexes, driving streaming queries via Kafka Streams or Apache Flink, synchronizing data between microservices, and many more.

    When talking to Debezium users, we generally receive very good feedback on the range of applications enabled by Debezium and its flexibility: e.g. each connector can be configured and fine-tuned in many ways, depending on your specific requirements. A large number of metrics provide deep insight into the state of running Debezium connectors, allowing to safely operate CDC pipelines also in huge installations with thousands of connectors.

    All this comes at the cost of a learning curve, though: users new to Debezium need to understand the different options and settings as well as learn about best practices for running Debezium in production. We’re therefore constantly exploring how the user experience of Debezium can be further improved, allowing people to set up and operate its connectors more easily.

    \ No newline at end of file + Tag: debezium-ui

    Debezium Blog

    The Debezium UI team continues to add support for more features, allowing users to configure connectors more easily. In this article, we’ll describe and demonstrate how to provide the additional properties for configuration that the UI does not expose by default. Read further for more information!

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    Over the last five years, Debezium has become a leading open-source solution for change data capture for a variety of databases. Users from all kinds of industries work with Debezium for use cases like replication of data from operational databases into data warehouses, updating caches and search indexes, driving streaming queries via Kafka Streams or Apache Flink, synchronizing data between microservices, and many more.

    When talking to Debezium users, we generally receive very good feedback on the range of applications enabled by Debezium and its flexibility: e.g. each connector can be configured and fine-tuned in many ways, depending on your specific requirements. A large number of metrics provide deep insight into the state of running Debezium connectors, allowing to safely operate CDC pipelines also in huge installations with thousands of connectors.

    All this comes at the cost of a learning curve, though: users new to Debezium need to understand the different options and settings as well as learn about best practices for running Debezium in production. We’re therefore constantly exploring how the user experience of Debezium can be further improved, allowing people to set up and operate its connectors more easily.

    \ No newline at end of file diff --git a/tag/debezium/index.html b/tag/debezium/index.html index f4f95f2b44..58d3dc8821 100644 --- a/tag/debezium/index.html +++ b/tag/debezium/index.html @@ -1 +1 @@ - Tag: debezium

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    This post is the final part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first installment of this series is found here and the second installment is found here.

    In this third and final installment, we are going to build on what we have done in the previous two posts, focusing on the following areas:

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    This tutorial was originally published by QuestDB, where guest contributor, Yitaek Hwang, shows us how to stream data into QuestDB with change data capture via Debezium and Kafka Connect.

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first part of this series is here.

    In this second installment, we will build on what we did in part one by deploying the Oracle connector using Zookeeper, Kafka, and Kafka Connect. We are going to discuss a variety of configuration options for the connector and why they’re essential. And finally, we’re going to see the connector in action!

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. Throughout the series, we’ll examine all the steps to setting up a proof of concept (POC) deployment for Debezium for Oracle. We will discuss setup and configurations as well as the nuances of multi-tenancy. We will also dive into any known pitfalls and concerns you may need to know and how to debug specific problems. And finally, we’ll talk about performance and monitoring to maintain a healthy connector deployment.

    Throughout this exercise, we hope that this will show you just how simple it is to deploy Debezium for Oracle. This installation and setup portion of the series may seem quite complicated, but many of these steps likely already exist in a pre-existing environment. We will dive into each step, explaining it is essential should you use a container image deployment.

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    \ No newline at end of file + Tag: debezium

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    This post is the final part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first installment of this series is found here and the second installment is found here.

    In this third and final installment, we are going to build on what we have done in the previous two posts, focusing on the following areas:

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    In this post, we are going to talk about a CDC-CQRS pipeline between a normalized relational database, MySQL, as the command database and a de-normalized NoSQL database, MongoDB, as the query database resulting in the creation of DDD Aggregates via Debezium & Kafka-Streams.

    This tutorial was originally published by QuestDB, where guest contributor, Yitaek Hwang, shows us how to stream data into QuestDB with change data capture via Debezium and Kafka Connect.

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first part of this series is here.

    In this second installment, we will build on what we did in part one by deploying the Oracle connector using Zookeeper, Kafka, and Kafka Connect. We are going to discuss a variety of configuration options for the connector and why they’re essential. And finally, we’re going to see the connector in action!

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. Throughout the series, we’ll examine all the steps to setting up a proof of concept (POC) deployment for Debezium for Oracle. We will discuss setup and configurations as well as the nuances of multi-tenancy. We will also dive into any known pitfalls and concerns you may need to know and how to debug specific problems. And finally, we’ll talk about performance and monitoring to maintain a healthy connector deployment.

    Throughout this exercise, we hope that this will show you just how simple it is to deploy Debezium for Oracle. This installation and setup portion of the series may seem quite complicated, but many of these steps likely already exist in a pre-existing environment. We will dive into each step, explaining it is essential should you use a container image deployment.

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    \ No newline at end of file diff --git a/tag/deduplication/index.html b/tag/deduplication/index.html index 13b7da6777..2b0b4f91e0 100644 --- a/tag/deduplication/index.html +++ b/tag/deduplication/index.html @@ -1 +1 @@ - Tag: deduplication

    Debezium Blog

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    \ No newline at end of file + Tag: deduplication

    Debezium Blog

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    \ No newline at end of file diff --git a/tag/discussion/index.html b/tag/discussion/index.html index 2811d8b285..1f42f65851 100644 --- a/tag/discussion/index.html +++ b/tag/discussion/index.html @@ -7,4 +7,4 @@ num.partitions = 1 compression.type = producer log.cleanup.policy = delete -log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    Setting up change data capture (CDC) pipelines with Debezium typically is a matter of configuration, without any programming being involved. It’s still a very good idea to have automated tests for your CDC set-up, making sure that everything is configured correctly and that your Debezium connectors are set up as intended.

    There’s two main components involved whose configuration need consideration:

    • The source database: it must be set up so that Debezium can connect to it and retrieve change events; details depend on the specific database, e.g. for MySQL the binlog must be in "row" mode, for Postgres, one of the supported logical decoding plug-ins must be installed, etc.

    • The Debezium connector: it must be configured using the right database host and credentials, possibly using SSL, applying table and column filters, potentially one or more single message transformations (SMTs), etc.

    We have developed a Debezium connector for usage with Db2 which is now available as part of the Debezium incubator. Here we describe the use case we have for Change Data Capture (CDC), the various approaches that already exist in the Db2 ecology, and how we came to Debezium. In addition, we motivate the approach we took to implementing the Db2 Debezium connector.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    This is a guest post by Apache Pulsar PMC Member and Committer Jia Zhai.

    Debezium is an open source project for change data capture (CDC). It is built on Apache Kafka Connect and supports multiple databases, such as MySQL, MongoDB, PostgreSQL, Oracle, and SQL Server. Apache Pulsar includes a set of built-in connectors based on Pulsar IO framework, which is counter part to Apache Kafka Connect.

    As of version 2.3.0, Pulsar IO comes with support for the Debezium source connectors out of the box, so you can leverage Debezium to stream changes from your databases into Apache Pulsar. This tutorial walks you through setting up the Debezium connector for MySQL with Pulsar IO.

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    The second-level cache of Hibernate ORM / JPA is a proven and efficient way to increase application performance: caching read-only or rarely modified entities avoids roundtrips to the database, resulting in improved response times of the application.

    Unlike the first-level cache, the second-level cache is associated with the session factory (or entity manager factory in JPA terms), so its contents are shared across transactions and concurrent sessions. Naturally, if a cached entity gets modified, the corresponding cache entry must be updated (or purged from the cache), too. As long as the data changes are done through Hibernate ORM, this is nothing to worry about: the ORM will update the cache automatically.

    Things get tricky, though, when bypassing the application, e.g. when modifying records directly in the database. Hibernate ORM then has no way of knowing that the cached data has become stale, and it’s necessary to invalidate the affected items explicitly. A common way for doing so is to foresee some admin functionality that allows to clear an application’s caches. For this to work, it’s vital to not forget about calling that invalidation functionality, or the application will keep working with outdated cached data.

    In the following we’re going to explore an alternative approach for cache invalidation, which works in a reliable and fully automated way: by employing Debezium and its change data capture (CDC) capabilities, you can track data changes in the database itself and react to any applied change. This allows to invalidate affected cache entries in near-realtime, without the risk of stale data due to missed changes. If an entry has been evicted from the cache, Hibernate ORM will load the latest version of the entity from the database the next time is requested.

    Updating external full text search indexes (e.g. Elasticsearch) after data changes is a very popular use case for change data capture (CDC).

    As we’ve discussed in a blog post a while ago, the combination of Debezium’s CDC source connectors and Confluent’s sink connector for Elasticsearch makes it straight forward to capture data changes in MySQL, Postgres etc. and push them towards Elasticsearch in near real-time. This results in a 1:1 relationship between tables in the source database and a corresponding search index in Elasticsearch, which is perfectly fine for many use cases.

    It gets more challenging though if you’d like to put entire aggregates into a single index. An example could be a customer and all their addresses; those would typically be stored in two separate tables in an RDBMS, linked by a foreign key, whereas you’d like to have just one index in Elasticsearch, containing documents of customers with their addresses embedded, allowing you to efficiently search for customers based on their address.

    Following up to the KStreams-based solution to this we described recently, we’d like to present in this post an alternative for materializing such aggregate views driven by the application layer.

    Most of the times Debezium is used to stream data changes into Apache Kafka. What though if you’re using another streaming platform such as Apache Pulsar or a cloud-based solution such as Amazon Kinesis, Azure Event Hubs and the like? Can you still benefit from Debezium’s powerful change data capture (CDC) capabilities and ingest changes from databases such as MySQL, Postgres, SQL Server etc.?

    Turns out, with just a bit of glue code, you can! In the following we’ll discuss how to use Debezium to capture changes in a MySQL database and stream the change events into Kinesis, a fully-managed data streaming service available on the Amazon cloud.

    Yesterday I had the opportunity to present Debezium and the idea of change data capture (CDC) to the Darmstadt Java User Group. It was a great evening with lots of interesting discussions and questions. One of the questions being the following: what is the advantage of using a log-based change data capturing tool such as Debezium over simply polling for updated records?

    So first of all, what’s the difference between the two approaches? With polling-based (or query-based) CDC you repeatedly run queries (e.g. via JDBC) for retrieving any newly inserted or updated rows from the tables to be captured. Log-based CDC in contrast works by reacting to any changes to the database’s log files (e.g. MySQL’s binlog or MongoDB’s op log).

    As this wasn’t the first time this question came up, I thought I could provide a more extensive answer also here on the blog. That way I’ll be able to refer to this post in the future, should the question come up again :)

    So without further ado, here’s my list of five advantages of log-based CDC over polling-based approaches.

    Microservice-based architectures can be considered an industry trend and are thus often found in enterprise applications lately. One possible way to keep data synchronized across multiple services and their backing data stores is to make us of an approach called change data capture, or CDC for short.

    Essentially CDC allows to listen to any modifications which are occurring at one end of a data flow (i.e. the data source) and communicate them as change events to other interested parties or storing them into a data sink. Instead of doing this in a point-to-point fashion, it’s advisable to decouple this flow of events between data sources and data sinks. Such a scenario can be implemented based on Debezium and Apache Kafka with relative ease and effectively no coding.

    As an example, consider the following microservice-based architecture of an order management system:

    \ No newline at end of file +log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    Setting up change data capture (CDC) pipelines with Debezium typically is a matter of configuration, without any programming being involved. It’s still a very good idea to have automated tests for your CDC set-up, making sure that everything is configured correctly and that your Debezium connectors are set up as intended.

    There’s two main components involved whose configuration need consideration:

    • The source database: it must be set up so that Debezium can connect to it and retrieve change events; details depend on the specific database, e.g. for MySQL the binlog must be in "row" mode, for Postgres, one of the supported logical decoding plug-ins must be installed, etc.

    • The Debezium connector: it must be configured using the right database host and credentials, possibly using SSL, applying table and column filters, potentially one or more single message transformations (SMTs), etc.

    We have developed a Debezium connector for usage with Db2 which is now available as part of the Debezium incubator. Here we describe the use case we have for Change Data Capture (CDC), the various approaches that already exist in the Db2 ecology, and how we came to Debezium. In addition, we motivate the approach we took to implementing the Db2 Debezium connector.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    This is a guest post by Apache Pulsar PMC Member and Committer Jia Zhai.

    Debezium is an open source project for change data capture (CDC). It is built on Apache Kafka Connect and supports multiple databases, such as MySQL, MongoDB, PostgreSQL, Oracle, and SQL Server. Apache Pulsar includes a set of built-in connectors based on Pulsar IO framework, which is counter part to Apache Kafka Connect.

    As of version 2.3.0, Pulsar IO comes with support for the Debezium source connectors out of the box, so you can leverage Debezium to stream changes from your databases into Apache Pulsar. This tutorial walks you through setting up the Debezium connector for MySQL with Pulsar IO.

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    The second-level cache of Hibernate ORM / JPA is a proven and efficient way to increase application performance: caching read-only or rarely modified entities avoids roundtrips to the database, resulting in improved response times of the application.

    Unlike the first-level cache, the second-level cache is associated with the session factory (or entity manager factory in JPA terms), so its contents are shared across transactions and concurrent sessions. Naturally, if a cached entity gets modified, the corresponding cache entry must be updated (or purged from the cache), too. As long as the data changes are done through Hibernate ORM, this is nothing to worry about: the ORM will update the cache automatically.

    Things get tricky, though, when bypassing the application, e.g. when modifying records directly in the database. Hibernate ORM then has no way of knowing that the cached data has become stale, and it’s necessary to invalidate the affected items explicitly. A common way for doing so is to foresee some admin functionality that allows to clear an application’s caches. For this to work, it’s vital to not forget about calling that invalidation functionality, or the application will keep working with outdated cached data.

    In the following we’re going to explore an alternative approach for cache invalidation, which works in a reliable and fully automated way: by employing Debezium and its change data capture (CDC) capabilities, you can track data changes in the database itself and react to any applied change. This allows to invalidate affected cache entries in near-realtime, without the risk of stale data due to missed changes. If an entry has been evicted from the cache, Hibernate ORM will load the latest version of the entity from the database the next time is requested.

    Updating external full text search indexes (e.g. Elasticsearch) after data changes is a very popular use case for change data capture (CDC).

    As we’ve discussed in a blog post a while ago, the combination of Debezium’s CDC source connectors and Confluent’s sink connector for Elasticsearch makes it straight forward to capture data changes in MySQL, Postgres etc. and push them towards Elasticsearch in near real-time. This results in a 1:1 relationship between tables in the source database and a corresponding search index in Elasticsearch, which is perfectly fine for many use cases.

    It gets more challenging though if you’d like to put entire aggregates into a single index. An example could be a customer and all their addresses; those would typically be stored in two separate tables in an RDBMS, linked by a foreign key, whereas you’d like to have just one index in Elasticsearch, containing documents of customers with their addresses embedded, allowing you to efficiently search for customers based on their address.

    Following up to the KStreams-based solution to this we described recently, we’d like to present in this post an alternative for materializing such aggregate views driven by the application layer.

    Most of the times Debezium is used to stream data changes into Apache Kafka. What though if you’re using another streaming platform such as Apache Pulsar or a cloud-based solution such as Amazon Kinesis, Azure Event Hubs and the like? Can you still benefit from Debezium’s powerful change data capture (CDC) capabilities and ingest changes from databases such as MySQL, Postgres, SQL Server etc.?

    Turns out, with just a bit of glue code, you can! In the following we’ll discuss how to use Debezium to capture changes in a MySQL database and stream the change events into Kinesis, a fully-managed data streaming service available on the Amazon cloud.

    Yesterday I had the opportunity to present Debezium and the idea of change data capture (CDC) to the Darmstadt Java User Group. It was a great evening with lots of interesting discussions and questions. One of the questions being the following: what is the advantage of using a log-based change data capturing tool such as Debezium over simply polling for updated records?

    So first of all, what’s the difference between the two approaches? With polling-based (or query-based) CDC you repeatedly run queries (e.g. via JDBC) for retrieving any newly inserted or updated rows from the tables to be captured. Log-based CDC in contrast works by reacting to any changes to the database’s log files (e.g. MySQL’s binlog or MongoDB’s op log).

    As this wasn’t the first time this question came up, I thought I could provide a more extensive answer also here on the blog. That way I’ll be able to refer to this post in the future, should the question come up again :)

    So without further ado, here’s my list of five advantages of log-based CDC over polling-based approaches.

    Microservice-based architectures can be considered an industry trend and are thus often found in enterprise applications lately. One possible way to keep data synchronized across multiple services and their backing data stores is to make us of an approach called change data capture, or CDC for short.

    Essentially CDC allows to listen to any modifications which are occurring at one end of a data flow (i.e. the data source) and communicate them as change events to other interested parties or storing them into a data sink. Instead of doing this in a point-to-point fashion, it’s advisable to decouple this flow of events between data sources and data sinks. Such a scenario can be implemented based on Debezium and Apache Kafka with relative ease and effectively no coding.

    As an example, consider the following microservice-based architecture of an order management system:

    \ No newline at end of file diff --git a/tag/docker/index.html b/tag/docker/index.html index ec9d5b1799..53ad51dda1 100644 --- a/tag/docker/index.html +++ b/tag/docker/index.html @@ -1 +1 @@ - Tag: docker

    Debezium Blog

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    As you may have noticed, the Docker company recently announced a reduction of the free organization accounts offering. The Docker company wanted to provide for free organization accounts only for Docker-Sponsored Open Source (DSOS) projects. Debezium project doesn’t meet their definition of open source project as we have a pathway to commercialization. As the accounts ought to be terminated in 30 days, we immediately started to work on moving out the Debezium project from Docker Hub.

    Apache Kafka 2.8 allows for a first glimpse into the ZooKeeper-less future of the widely used event streaming platform: shipping with a preview of KIP-500 ("Replace ZooKeeper with a Self-Managed Metadata Quorum"), you can now run Kafka clusters without the need for setting up and operating Apache ZooKeeper. This does not only simplify running Kafka from an operational perspective, the new metadata quorum implementation (named "KRaft", Kafka Raft metadata mode) also should provide much better scaling characteristics, for instance when it comes to large numbers of topics and partitions.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    Debezium has received a huge improvement to the structure of its container images recently, making it extremely simple to extend its behaviour.

    This is a small tutorial showing how you can for instance add Sentry, "an open-source error tracking [software] that helps developers monitor and fix crashes in real time". Here we’ll use it to collect and report any exceptions from Kafka Connect and its connectors. Note that this is only applicable for Debezium 0.9+.

    We need a few things to have Sentry working, and we’ll add all of them and later have a Dockerfile which gets it all glued correctly:

    • Configure Log4j

    • SSL certificate for sentry.io, since it’s not by default in the JVM trusted chain

    • The sentry and sentry-log4j libraries

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    With the recent Debezium release, we’re happy to announce that a new PostgreSQL connector has been added alongside the already existing MySQL and MongoDB connectors.

    We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

    Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

    I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

    I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

    Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Now the good news — Debezium 0.1 is now available and includes several significant features:

    • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

    • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

    \ No newline at end of file + Tag: docker

    Debezium Blog

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    As you may have noticed, the Docker company recently announced a reduction of the free organization accounts offering. The Docker company wanted to provide for free organization accounts only for Docker-Sponsored Open Source (DSOS) projects. Debezium project doesn’t meet their definition of open source project as we have a pathway to commercialization. As the accounts ought to be terminated in 30 days, we immediately started to work on moving out the Debezium project from Docker Hub.

    Apache Kafka 2.8 allows for a first glimpse into the ZooKeeper-less future of the widely used event streaming platform: shipping with a preview of KIP-500 ("Replace ZooKeeper with a Self-Managed Metadata Quorum"), you can now run Kafka clusters without the need for setting up and operating Apache ZooKeeper. This does not only simplify running Kafka from an operational perspective, the new metadata quorum implementation (named "KRaft", Kafka Raft metadata mode) also should provide much better scaling characteristics, for instance when it comes to large numbers of topics and partitions.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    Debezium has received a huge improvement to the structure of its container images recently, making it extremely simple to extend its behaviour.

    This is a small tutorial showing how you can for instance add Sentry, "an open-source error tracking [software] that helps developers monitor and fix crashes in real time". Here we’ll use it to collect and report any exceptions from Kafka Connect and its connectors. Note that this is only applicable for Debezium 0.9+.

    We need a few things to have Sentry working, and we’ll add all of them and later have a Dockerfile which gets it all glued correctly:

    • Configure Log4j

    • SSL certificate for sentry.io, since it’s not by default in the JVM trusted chain

    • The sentry and sentry-log4j libraries

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    With the recent Debezium release, we’re happy to announce that a new PostgreSQL connector has been added alongside the already existing MySQL and MongoDB connectors.

    We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

    Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

    I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

    I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

    Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Now the good news — Debezium 0.1 is now available and includes several significant features:

    • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

    • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

    \ No newline at end of file diff --git a/tag/elasticsearch/index.html b/tag/elasticsearch/index.html index caf4341527..b176a5976b 100644 --- a/tag/elasticsearch/index.html +++ b/tag/elasticsearch/index.html @@ -1 +1 @@ - Tag: elasticsearch

    Debezium Blog

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    \ No newline at end of file + Tag: elasticsearch

    Debezium Blog

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    \ No newline at end of file diff --git a/tag/event-sourcing/index.html b/tag/event-sourcing/index.html index e5f34839b5..ca3a77c172 100644 --- a/tag/event-sourcing/index.html +++ b/tag/event-sourcing/index.html @@ -1 +1 @@ - Tag: event-sourcing

    Debezium Blog

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    \ No newline at end of file + Tag: event-sourcing

    Debezium Blog

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    \ No newline at end of file diff --git a/tag/exactly-once-semantics/index.html b/tag/exactly-once-semantics/index.html index dbe4484792..f83b136d59 100644 --- a/tag/exactly-once-semantics/index.html +++ b/tag/exactly-once-semantics/index.html @@ -1 +1 @@ - Tag: exactly-once-semantics

    Debezium Blog

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    \ No newline at end of file + Tag: exactly-once-semantics

    Debezium Blog

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    \ No newline at end of file diff --git a/tag/example/index.html b/tag/example/index.html index ee6101dc52..affe3c7abe 100644 --- a/tag/example/index.html +++ b/tag/example/index.html @@ -1 +1 @@ - Tag: example

    Debezium Blog

    When a Debezium connector is deployed to a Kafka Connect instance it is sometimes necessary to keep database credentials hidden from other users of the Connect API.

    Let’s remind how a connector registration request looks like for the MySQL Debezium connector:

    Last updated at Nov 21st 2018 (adjusted to new KSQL Docker images).

    Last year we have seen the inception of a new open-source project in the Apache Kafka universe, KSQL, which is a streaming SQL engine build on top of Kafka Streams. In this post, we are going to try out KSQL querying with data change events generated by Debezium from a MySQL database.

    As a source of data we will use the database and setup from our tutorial. The result of this exercise should be similar to the recent post about aggregation of events into domain driven aggregates.

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    \ No newline at end of file + Tag: example

    Debezium Blog

    When a Debezium connector is deployed to a Kafka Connect instance it is sometimes necessary to keep database credentials hidden from other users of the Connect API.

    Let’s remind how a connector registration request looks like for the MySQL Debezium connector:

    Last updated at Nov 21st 2018 (adjusted to new KSQL Docker images).

    Last year we have seen the inception of a new open-source project in the Apache Kafka universe, KSQL, which is a streaming SQL engine build on top of Kafka Streams. In this post, we are going to try out KSQL querying with data change events generated by Debezium from a MySQL database.

    As a source of data we will use the database and setup from our tutorial. The result of this exercise should be similar to the recent post about aggregation of events into domain driven aggregates.

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    \ No newline at end of file diff --git a/tag/examples/index.html b/tag/examples/index.html index da23876b3f..3ac177702a 100644 --- a/tag/examples/index.html +++ b/tag/examples/index.html @@ -1 +1 @@ - Tag: examples

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    This post is the final part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first installment of this series is found here and the second installment is found here.

    In this third and final installment, we are going to build on what we have done in the previous two posts, focusing on the following areas:

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first part of this series is here.

    In this second installment, we will build on what we did in part one by deploying the Oracle connector using Zookeeper, Kafka, and Kafka Connect. We are going to discuss a variety of configuration options for the connector and why they’re essential. And finally, we’re going to see the connector in action!

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. Throughout the series, we’ll examine all the steps to setting up a proof of concept (POC) deployment for Debezium for Oracle. We will discuss setup and configurations as well as the nuances of multi-tenancy. We will also dive into any known pitfalls and concerns you may need to know and how to debug specific problems. And finally, we’ll talk about performance and monitoring to maintain a healthy connector deployment.

    Throughout this exercise, we hope that this will show you just how simple it is to deploy Debezium for Oracle. This installation and setup portion of the series may seem quite complicated, but many of these steps likely already exist in a pre-existing environment. We will dive into each step, explaining it is essential should you use a container image deployment.

    Apache Kafka 2.8 allows for a first glimpse into the ZooKeeper-less future of the widely used event streaming platform: shipping with a preview of KIP-500 ("Replace ZooKeeper with a Self-Managed Metadata Quorum"), you can now run Kafka clusters without the need for setting up and operating Apache ZooKeeper. This does not only simplify running Kafka from an operational perspective, the new metadata quorum implementation (named "KRaft", Kafka Raft metadata mode) also should provide much better scaling characteristics, for instance when it comes to large numbers of topics and partitions.

    Kafka Streams is a library for developing stream processing applications based on Apache Kafka. Quoting its docs, "a Kafka Streams application processes record streams through a topology in real-time, processing data continuously, concurrently, and in a record-by-record manner". The Kafka Streams DSL provides a range of stream processing operations such as a map, filter, join, and aggregate.

    Non-Key Joins in Kafka Streams

    Debezium’s CDC source connectors make it easy to capture data changes in databases and push them towards sink systems such as Elasticsearch in near real-time. By default, this results in a 1:1 relationship between tables in the source database, the corresponding Kafka topics, and a representation of the data at the sink side, such as a search index in Elasticsearch.

    In case of 1:n relationships, say between a table of customers and a table of addresses, consumers often are interested in a view of the data that is a single, nested data structure, e.g. a single Elasticsearch document representing a customer and all their addresses.

    This is where KIP-213 ("Kafka Improvement Proposal") and its foreign key joining capabilities come in: it was introduced in Apache Kafka 2.4 "to close the gap between the semantics of KTables in streams and tables in relational databases". Before KIP-213, in order to join messages from two Debezium change event topics, you’d typically have to manually re-key at least one of the topics, so to make sure the same key is used on both sides of the join.

    Thanks to KIP-213, this isn’t needed any longer, as it allows to join two Kafka topics on fields extracted from the Kafka message value, taking care of the required re-keying automatically, in a fully transparent way. Comparing to previous approaches, this drastically reduces the effort for creating aggregated events from Debezium’s CDC events.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    This is a guest post by Apache Pulsar PMC Member and Committer Jia Zhai.

    Debezium is an open source project for change data capture (CDC). It is built on Apache Kafka Connect and supports multiple databases, such as MySQL, MongoDB, PostgreSQL, Oracle, and SQL Server. Apache Pulsar includes a set of built-in connectors based on Pulsar IO framework, which is counter part to Apache Kafka Connect.

    As of version 2.3.0, Pulsar IO comes with support for the Debezium source connectors out of the box, so you can leverage Debezium to stream changes from your databases into Apache Pulsar. This tutorial walks you through setting up the Debezium connector for MySQL with Pulsar IO.

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    The second-level cache of Hibernate ORM / JPA is a proven and efficient way to increase application performance: caching read-only or rarely modified entities avoids roundtrips to the database, resulting in improved response times of the application.

    Unlike the first-level cache, the second-level cache is associated with the session factory (or entity manager factory in JPA terms), so its contents are shared across transactions and concurrent sessions. Naturally, if a cached entity gets modified, the corresponding cache entry must be updated (or purged from the cache), too. As long as the data changes are done through Hibernate ORM, this is nothing to worry about: the ORM will update the cache automatically.

    Things get tricky, though, when bypassing the application, e.g. when modifying records directly in the database. Hibernate ORM then has no way of knowing that the cached data has become stale, and it’s necessary to invalidate the affected items explicitly. A common way for doing so is to foresee some admin functionality that allows to clear an application’s caches. For this to work, it’s vital to not forget about calling that invalidation functionality, or the application will keep working with outdated cached data.

    In the following we’re going to explore an alternative approach for cache invalidation, which works in a reliable and fully automated way: by employing Debezium and its change data capture (CDC) capabilities, you can track data changes in the database itself and react to any applied change. This allows to invalidate affected cache entries in near-realtime, without the risk of stale data due to missed changes. If an entry has been evicted from the cache, Hibernate ORM will load the latest version of the entity from the database the next time is requested.

    Updating external full text search indexes (e.g. Elasticsearch) after data changes is a very popular use case for change data capture (CDC).

    As we’ve discussed in a blog post a while ago, the combination of Debezium’s CDC source connectors and Confluent’s sink connector for Elasticsearch makes it straight forward to capture data changes in MySQL, Postgres etc. and push them towards Elasticsearch in near real-time. This results in a 1:1 relationship between tables in the source database and a corresponding search index in Elasticsearch, which is perfectly fine for many use cases.

    It gets more challenging though if you’d like to put entire aggregates into a single index. An example could be a customer and all their addresses; those would typically be stored in two separate tables in an RDBMS, linked by a foreign key, whereas you’d like to have just one index in Elasticsearch, containing documents of customers with their addresses embedded, allowing you to efficiently search for customers based on their address.

    Following up to the KStreams-based solution to this we described recently, we’d like to present in this post an alternative for materializing such aggregate views driven by the application layer.

    Most of the times Debezium is used to stream data changes into Apache Kafka. What though if you’re using another streaming platform such as Apache Pulsar or a cloud-based solution such as Amazon Kinesis, Azure Event Hubs and the like? Can you still benefit from Debezium’s powerful change data capture (CDC) capabilities and ingest changes from databases such as MySQL, Postgres, SQL Server etc.?

    Turns out, with just a bit of glue code, you can! In the following we’ll discuss how to use Debezium to capture changes in a MySQL database and stream the change events into Kinesis, a fully-managed data streaming service available on the Amazon cloud.

    Microservice-based architectures can be considered an industry trend and are thus often found in enterprise applications lately. One possible way to keep data synchronized across multiple services and their backing data stores is to make us of an approach called change data capture, or CDC for short.

    Essentially CDC allows to listen to any modifications which are occurring at one end of a data flow (i.e. the data source) and communicate them as change events to other interested parties or storing them into a data sink. Instead of doing this in a point-to-point fashion, it’s advisable to decouple this flow of events between data sources and data sinks. Such a scenario can be implemented based on Debezium and Apache Kafka with relative ease and effectively no coding.

    As an example, consider the following microservice-based architecture of an order management system:

    \ No newline at end of file + Tag: examples

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    This post is the final part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first installment of this series is found here and the second installment is found here.

    In this third and final installment, we are going to build on what we have done in the previous two posts, focusing on the following areas:

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first part of this series is here.

    In this second installment, we will build on what we did in part one by deploying the Oracle connector using Zookeeper, Kafka, and Kafka Connect. We are going to discuss a variety of configuration options for the connector and why they’re essential. And finally, we’re going to see the connector in action!

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. Throughout the series, we’ll examine all the steps to setting up a proof of concept (POC) deployment for Debezium for Oracle. We will discuss setup and configurations as well as the nuances of multi-tenancy. We will also dive into any known pitfalls and concerns you may need to know and how to debug specific problems. And finally, we’ll talk about performance and monitoring to maintain a healthy connector deployment.

    Throughout this exercise, we hope that this will show you just how simple it is to deploy Debezium for Oracle. This installation and setup portion of the series may seem quite complicated, but many of these steps likely already exist in a pre-existing environment. We will dive into each step, explaining it is essential should you use a container image deployment.

    Apache Kafka 2.8 allows for a first glimpse into the ZooKeeper-less future of the widely used event streaming platform: shipping with a preview of KIP-500 ("Replace ZooKeeper with a Self-Managed Metadata Quorum"), you can now run Kafka clusters without the need for setting up and operating Apache ZooKeeper. This does not only simplify running Kafka from an operational perspective, the new metadata quorum implementation (named "KRaft", Kafka Raft metadata mode) also should provide much better scaling characteristics, for instance when it comes to large numbers of topics and partitions.

    Kafka Streams is a library for developing stream processing applications based on Apache Kafka. Quoting its docs, "a Kafka Streams application processes record streams through a topology in real-time, processing data continuously, concurrently, and in a record-by-record manner". The Kafka Streams DSL provides a range of stream processing operations such as a map, filter, join, and aggregate.

    Non-Key Joins in Kafka Streams

    Debezium’s CDC source connectors make it easy to capture data changes in databases and push them towards sink systems such as Elasticsearch in near real-time. By default, this results in a 1:1 relationship between tables in the source database, the corresponding Kafka topics, and a representation of the data at the sink side, such as a search index in Elasticsearch.

    In case of 1:n relationships, say between a table of customers and a table of addresses, consumers often are interested in a view of the data that is a single, nested data structure, e.g. a single Elasticsearch document representing a customer and all their addresses.

    This is where KIP-213 ("Kafka Improvement Proposal") and its foreign key joining capabilities come in: it was introduced in Apache Kafka 2.4 "to close the gap between the semantics of KTables in streams and tables in relational databases". Before KIP-213, in order to join messages from two Debezium change event topics, you’d typically have to manually re-key at least one of the topics, so to make sure the same key is used on both sides of the join.

    Thanks to KIP-213, this isn’t needed any longer, as it allows to join two Kafka topics on fields extracted from the Kafka message value, taking care of the required re-keying automatically, in a fully transparent way. Comparing to previous approaches, this drastically reduces the effort for creating aggregated events from Debezium’s CDC events.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    This is a guest post by Apache Pulsar PMC Member and Committer Jia Zhai.

    Debezium is an open source project for change data capture (CDC). It is built on Apache Kafka Connect and supports multiple databases, such as MySQL, MongoDB, PostgreSQL, Oracle, and SQL Server. Apache Pulsar includes a set of built-in connectors based on Pulsar IO framework, which is counter part to Apache Kafka Connect.

    As of version 2.3.0, Pulsar IO comes with support for the Debezium source connectors out of the box, so you can leverage Debezium to stream changes from your databases into Apache Pulsar. This tutorial walks you through setting up the Debezium connector for MySQL with Pulsar IO.

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    The second-level cache of Hibernate ORM / JPA is a proven and efficient way to increase application performance: caching read-only or rarely modified entities avoids roundtrips to the database, resulting in improved response times of the application.

    Unlike the first-level cache, the second-level cache is associated with the session factory (or entity manager factory in JPA terms), so its contents are shared across transactions and concurrent sessions. Naturally, if a cached entity gets modified, the corresponding cache entry must be updated (or purged from the cache), too. As long as the data changes are done through Hibernate ORM, this is nothing to worry about: the ORM will update the cache automatically.

    Things get tricky, though, when bypassing the application, e.g. when modifying records directly in the database. Hibernate ORM then has no way of knowing that the cached data has become stale, and it’s necessary to invalidate the affected items explicitly. A common way for doing so is to foresee some admin functionality that allows to clear an application’s caches. For this to work, it’s vital to not forget about calling that invalidation functionality, or the application will keep working with outdated cached data.

    In the following we’re going to explore an alternative approach for cache invalidation, which works in a reliable and fully automated way: by employing Debezium and its change data capture (CDC) capabilities, you can track data changes in the database itself and react to any applied change. This allows to invalidate affected cache entries in near-realtime, without the risk of stale data due to missed changes. If an entry has been evicted from the cache, Hibernate ORM will load the latest version of the entity from the database the next time is requested.

    Updating external full text search indexes (e.g. Elasticsearch) after data changes is a very popular use case for change data capture (CDC).

    As we’ve discussed in a blog post a while ago, the combination of Debezium’s CDC source connectors and Confluent’s sink connector for Elasticsearch makes it straight forward to capture data changes in MySQL, Postgres etc. and push them towards Elasticsearch in near real-time. This results in a 1:1 relationship between tables in the source database and a corresponding search index in Elasticsearch, which is perfectly fine for many use cases.

    It gets more challenging though if you’d like to put entire aggregates into a single index. An example could be a customer and all their addresses; those would typically be stored in two separate tables in an RDBMS, linked by a foreign key, whereas you’d like to have just one index in Elasticsearch, containing documents of customers with their addresses embedded, allowing you to efficiently search for customers based on their address.

    Following up to the KStreams-based solution to this we described recently, we’d like to present in this post an alternative for materializing such aggregate views driven by the application layer.

    Most of the times Debezium is used to stream data changes into Apache Kafka. What though if you’re using another streaming platform such as Apache Pulsar or a cloud-based solution such as Amazon Kinesis, Azure Event Hubs and the like? Can you still benefit from Debezium’s powerful change data capture (CDC) capabilities and ingest changes from databases such as MySQL, Postgres, SQL Server etc.?

    Turns out, with just a bit of glue code, you can! In the following we’ll discuss how to use Debezium to capture changes in a MySQL database and stream the change events into Kinesis, a fully-managed data streaming service available on the Amazon cloud.

    Microservice-based architectures can be considered an industry trend and are thus often found in enterprise applications lately. One possible way to keep data synchronized across multiple services and their backing data stores is to make us of an approach called change data capture, or CDC for short.

    Essentially CDC allows to listen to any modifications which are occurring at one end of a data flow (i.e. the data source) and communicate them as change events to other interested parties or storing them into a data sink. Instead of doing this in a point-to-point fashion, it’s advisable to decouple this flow of events between data sources and data sinks. Such a scenario can be implemented based on Debezium and Apache Kafka with relative ease and effectively no coding.

    As an example, consider the following microservice-based architecture of an order management system:

    \ No newline at end of file diff --git a/tag/features/index.html b/tag/features/index.html index 92752baa0a..c877bf1e1e 100644 --- a/tag/features/index.html +++ b/tag/features/index.html @@ -1 +1 @@ - Tag: features

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    \ No newline at end of file + Tag: features

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    \ No newline at end of file diff --git a/tag/fedora/index.html b/tag/fedora/index.html index 5e57f1e63e..5bba7350ac 100644 --- a/tag/fedora/index.html +++ b/tag/fedora/index.html @@ -1 +1 @@ - Tag: fedora

    Debezium Blog

    The Debezium project strives to provide an easy deployment of connectors, so users can try and run connectors of their choice mostly by getting the right connector archive and unpacking it into the plug-in path of Kafka Connect.

    This is true for all connectors but for the Debezium PostgreSQL connector. This connector is specific in the regard that it requires a logical decoding plug-in to be installed inside the PostgreSQL source database(s) themselves. Currently, there are two supported logical plug-ins:

    • postgres-decoderbufs, which uses Protocol Buffers as a very compact transport format and which is maintained by the Debezium community

    • JSON-based, which is based on JSON and which is maintained by its own upstream community

    \ No newline at end of file + Tag: fedora

    Debezium Blog

    The Debezium project strives to provide an easy deployment of connectors, so users can try and run connectors of their choice mostly by getting the right connector archive and unpacking it into the plug-in path of Kafka Connect.

    This is true for all connectors but for the Debezium PostgreSQL connector. This connector is specific in the regard that it requires a logical decoding plug-in to be installed inside the PostgreSQL source database(s) themselves. Currently, there are two supported logical plug-ins:

    • postgres-decoderbufs, which uses Protocol Buffers as a very compact transport format and which is maintained by the Debezium community

    • JSON-based, which is based on JSON and which is maintained by its own upstream community

    \ No newline at end of file diff --git a/tag/flink/index.html b/tag/flink/index.html index 865d5596d6..f5a1b09a78 100644 --- a/tag/flink/index.html +++ b/tag/flink/index.html @@ -1 +1 @@ - Tag: flink

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    \ No newline at end of file + Tag: flink

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    \ No newline at end of file diff --git a/tag/hiring/index.html b/tag/hiring/index.html index 1fbe0ef54f..b53ade3a4d 100644 --- a/tag/hiring/index.html +++ b/tag/hiring/index.html @@ -1 +1 @@ - Tag: hiring

    Debezium Blog

    In November last year, we announced we were looking for reinforcements for the team. And I have two pieces of news for you today: a good one and an even better one.

    As you are probably well aware, Gunnar Morling has stepped down from his position as Debezium project lead and is now pursuing new exciting adventures. It is sad, but every cloud has a silver lining!

    What can it be? We (the Debezium team and Red Hat) are hiring! Are you a community contributor? Do you have any pull requests under your belt? Are you a happy Debezium user and eager to do more, or are you a seasoned Java developer looking for work in an exciting and inclusive open-source environment?

    \ No newline at end of file + Tag: hiring

    Debezium Blog

    In November last year, we announced we were looking for reinforcements for the team. And I have two pieces of news for you today: a good one and an even better one.

    As you are probably well aware, Gunnar Morling has stepped down from his position as Debezium project lead and is now pursuing new exciting adventures. It is sad, but every cloud has a silver lining!

    What can it be? We (the Debezium team and Red Hat) are hiring! Are you a community contributor? Do you have any pull requests under your belt? Are you a happy Debezium user and eager to do more, or are you a seasoned Java developer looking for work in an exciting and inclusive open-source environment?

    \ No newline at end of file diff --git a/tag/ibmi/index.html b/tag/ibmi/index.html index 372b834f9b..c146f8316b 100644 --- a/tag/ibmi/index.html +++ b/tag/ibmi/index.html @@ -1 +1 @@ - Tag: ibmi

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    \ No newline at end of file + Tag: ibmi

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    \ No newline at end of file diff --git a/tag/iceberg/index.html b/tag/iceberg/index.html index 632f52b1ab..ec673cbcc8 100644 --- a/tag/iceberg/index.html +++ b/tag/iceberg/index.html @@ -1 +1 @@ - Tag: iceberg

    Debezium Blog

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    \ No newline at end of file + Tag: iceberg

    Debezium Blog

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    \ No newline at end of file diff --git a/tag/images/index.html b/tag/images/index.html index d7df2bb63f..803a0d17bb 100644 --- a/tag/images/index.html +++ b/tag/images/index.html @@ -1 +1 @@ - Tag: images

    Debezium Blog

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    \ No newline at end of file + Tag: images

    Debezium Blog

    The Debezium community is in the homestretch for the next major milestone, Debezium 3. We wanted to take this opportunity to remind the community of our plans regarding Debezium’s container images…​

    \ No newline at end of file diff --git a/tag/index.html b/tag/index.html index 90c4ca54af..7db026f3cb 100644 --- a/tag/index.html +++ b/tag/index.html @@ -1 +1 @@ - Debezium Blog

    Debezium Blog

    \ No newline at end of file + Debezium Blog

    Debezium Blog

    \ No newline at end of file diff --git a/tag/informix/index.html b/tag/informix/index.html index a67471b23b..0c0f2d617a 100644 --- a/tag/informix/index.html +++ b/tag/informix/index.html @@ -1 +1 @@ - Tag: informix

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    \ No newline at end of file + Tag: informix

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    \ No newline at end of file diff --git a/tag/integration/index.html b/tag/integration/index.html index f26e2b6a8d..e884b0b3d7 100644 --- a/tag/integration/index.html +++ b/tag/integration/index.html @@ -1 +1 @@ - Tag: integration

    Debezium Blog

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    One of the typical Debezium uses cases is to use change data capture to integrate a legacy system with other systems in the organization. There are multiple ways how to achieve this goal

    • Write data to Kafka using Debezium and follow with a combination of Kafka Streams pipelines and Kafka Connect connectors to deliver the changes to other systems

    • Use Debezium Embedded engine in a Java standalone application and write the integration code using plain Java; that’s often used to send change events to alternative messaging infrastructure such as Amazon Kinesis, Google Pub/Sub etc.

    • Use an existing integration framework or service bus to express the pipeline logic

    This article is focusing on the third option - a dedicated integration framework.

    \ No newline at end of file + Tag: integration

    Debezium Blog

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    One of the typical Debezium uses cases is to use change data capture to integrate a legacy system with other systems in the organization. There are multiple ways how to achieve this goal

    • Write data to Kafka using Debezium and follow with a combination of Kafka Streams pipelines and Kafka Connect connectors to deliver the changes to other systems

    • Use Debezium Embedded engine in a Java standalone application and write the integration code using plain Java; that’s often used to send change events to alternative messaging infrastructure such as Amazon Kinesis, Google Pub/Sub etc.

    • Use an existing integration framework or service bus to express the pipeline logic

    This article is focusing on the third option - a dedicated integration framework.

    \ No newline at end of file diff --git a/tag/introduction/index.html b/tag/introduction/index.html index 502898dca7..e88556ae6e 100644 --- a/tag/introduction/index.html +++ b/tag/introduction/index.html @@ -1 +1 @@ - Tag: introduction

    Debezium Blog

    Last week I had the pleasure to do a webinar on change data streaming patterns for microservices with the fabulous Burr Sutter at DevNation Live.

    The recording of that 30 min session is available on YouTube now. It also contains a demo that shows how to set-up a data streaming pipeline with Debezium and Apache Kafka, running on OpenShift. The demo begins at 12 min 40 into the recording.

    Enjoy!

    Debezium’s project lead Gunnar Morling gave a few talks during recent Devoxx Belgium 2017. One of his talks was dedicated to Debezium and change data capture in general.

    If you are interested in those topics and you want to obtain a fast and simple introduction to it, do not hesitate and watch the talk. Batteries and demo included!

    \ No newline at end of file + Tag: introduction

    Debezium Blog

    Last week I had the pleasure to do a webinar on change data streaming patterns for microservices with the fabulous Burr Sutter at DevNation Live.

    The recording of that 30 min session is available on YouTube now. It also contains a demo that shows how to set-up a data streaming pipeline with Debezium and Apache Kafka, running on OpenShift. The demo begins at 12 min 40 into the recording.

    Enjoy!

    Debezium’s project lead Gunnar Morling gave a few talks during recent Devoxx Belgium 2017. One of his talks was dedicated to Debezium and change data capture in general.

    If you are interested in those topics and you want to obtain a fast and simple introduction to it, do not hesitate and watch the talk. Batteries and demo included!

    \ No newline at end of file diff --git a/tag/jaeger/index.html b/tag/jaeger/index.html index bda212aa6c..fa69ef2755 100644 --- a/tag/jaeger/index.html +++ b/tag/jaeger/index.html @@ -1 +1 @@ - Tag: jaeger

    Debezium Blog

    The current pattern in application development gravitates toward microservices and microservices architecture. While this approach gives the developer teams great flexibility in terms of independent deployments and development velocity, the drawback is at hand when you try to track a bug in production. Monolithic applications sit nicely at a single place so you can introspect the code flows and the application’s runtime state. This is more challenging with microservice architectures, as a single business transaction...

    \ No newline at end of file + Tag: jaeger

    Debezium Blog

    The current pattern in application development gravitates toward microservices and microservices architecture. While this approach gives the developer teams great flexibility in terms of independent deployments and development velocity, the drawback is at hand when you try to track a bug in production. Monolithic applications sit nicely at a single place so you can introspect the code flows and the application’s runtime state. This is more challenging with microservice architectures, as a single business transaction...

    \ No newline at end of file diff --git a/tag/jdbc/index.html b/tag/jdbc/index.html index 377b02c0e0..2f4649e40d 100644 --- a/tag/jdbc/index.html +++ b/tag/jdbc/index.html @@ -1 +1 @@ - Tag: jdbc

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    \ No newline at end of file + Tag: jdbc

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    \ No newline at end of file diff --git a/tag/json/index.html b/tag/json/index.html index 4f112054a8..908ccee5c5 100644 --- a/tag/json/index.html +++ b/tag/json/index.html @@ -1 +1 @@ - Tag: json

    Debezium Blog

    MySQL 5.7 introduced a new data type for storing and working with JSON data. Clients can define tables with columns using the new JSON datatype, and they can store and read JSON data using SQL statements and new built-in JSON functions to construct JSON data from other relational columns, introspect the structure of JSON values, and search within and manipulate JSON data. It possible to define generated columns on tables whose values are computed from the JSON value in another column of the same table, and to then define indexes with those generated columns. Overall, this is really a very powerful feature in MySQL.

    Debezium’s MySQL connector will support the JSON datatype starting with the upcoming 0.3.4 release. JSON document, array, and scalar values will appear in change events as strings with io.debezium.data.json for the schema name. This will make it natural for consumers to work with JSON data. BTW, this is the same semantic schema type used by the MongoDB connector to represent JSON data.

    This sounds straightforward, and we hope it is. But implementing this required a fair amount of work. That’s because although MySQL exposes JSON data as strings to client applications, internally it stores all JSON data in a special binary form that allows the MySQL engine to efficiently access the JSON data in queries, JSON functions and generated columns. All JSON data appears in the binlog in this binary form as well, which meant that we had to parse the binary form ourselves if we wanted to extract the more useful string representation. Writing and testing this parser took a bit of time and effort, and ultimately we donated it to the excellent MySQL binlog client library that the connector uses internally to read the binlog events.

    \ No newline at end of file + Tag: json

    Debezium Blog

    MySQL 5.7 introduced a new data type for storing and working with JSON data. Clients can define tables with columns using the new JSON datatype, and they can store and read JSON data using SQL statements and new built-in JSON functions to construct JSON data from other relational columns, introspect the structure of JSON values, and search within and manipulate JSON data. It possible to define generated columns on tables whose values are computed from the JSON value in another column of the same table, and to then define indexes with those generated columns. Overall, this is really a very powerful feature in MySQL.

    Debezium’s MySQL connector will support the JSON datatype starting with the upcoming 0.3.4 release. JSON document, array, and scalar values will appear in change events as strings with io.debezium.data.json for the schema name. This will make it natural for consumers to work with JSON data. BTW, this is the same semantic schema type used by the MongoDB connector to represent JSON data.

    This sounds straightforward, and we hope it is. But implementing this required a fair amount of work. That’s because although MySQL exposes JSON data as strings to client applications, internally it stores all JSON data in a special binary form that allows the MySQL engine to efficiently access the JSON data in queries, JSON functions and generated columns. All JSON data appears in the binlog in this binary form as well, which meant that we had to parse the binary form ourselves if we wanted to extract the more useful string representation. Writing and testing this parser took a bit of time and effort, and ultimately we donated it to the excellent MySQL binlog client library that the connector uses internally to read the binlog events.

    \ No newline at end of file diff --git a/tag/kafka streams/index.html b/tag/kafka streams/index.html index 726d292d55..ff7dc1e1c5 100644 --- a/tag/kafka streams/index.html +++ b/tag/kafka streams/index.html @@ -1 +1 @@ - Tag: kafka streams

    Debezium Blog

    Kafka Streams is a library for developing stream processing applications based on Apache Kafka. Quoting its docs, "a Kafka Streams application processes record streams through a topology in real-time, processing data continuously, concurrently, and in a record-by-record manner". The Kafka Streams DSL provides a range of stream processing operations such as a map, filter, join, and aggregate.

    Non-Key Joins in Kafka Streams

    Debezium’s CDC source connectors make it easy to capture data changes in databases and push them towards sink systems such as Elasticsearch in near real-time. By default, this results in a 1:1 relationship between tables in the source database, the corresponding Kafka topics, and a representation of the data at the sink side, such as a search index in Elasticsearch.

    In case of 1:n relationships, say between a table of customers and a table of addresses, consumers often are interested in a view of the data that is a single, nested data structure, e.g. a single Elasticsearch document representing a customer and all their addresses.

    This is where KIP-213 ("Kafka Improvement Proposal") and its foreign key joining capabilities come in: it was introduced in Apache Kafka 2.4 "to close the gap between the semantics of KTables in streams and tables in relational databases". Before KIP-213, in order to join messages from two Debezium change event topics, you’d typically have to manually re-key at least one of the topics, so to make sure the same key is used on both sides of the join.

    Thanks to KIP-213, this isn’t needed any longer, as it allows to join two Kafka topics on fields extracted from the Kafka message value, taking care of the required re-keying automatically, in a fully transparent way. Comparing to previous approaches, this drastically reduces the effort for creating aggregated events from Debezium’s CDC events.

    \ No newline at end of file + Tag: kafka streams

    Debezium Blog

    Kafka Streams is a library for developing stream processing applications based on Apache Kafka. Quoting its docs, "a Kafka Streams application processes record streams through a topology in real-time, processing data continuously, concurrently, and in a record-by-record manner". The Kafka Streams DSL provides a range of stream processing operations such as a map, filter, join, and aggregate.

    Non-Key Joins in Kafka Streams

    Debezium’s CDC source connectors make it easy to capture data changes in databases and push them towards sink systems such as Elasticsearch in near real-time. By default, this results in a 1:1 relationship between tables in the source database, the corresponding Kafka topics, and a representation of the data at the sink side, such as a search index in Elasticsearch.

    In case of 1:n relationships, say between a table of customers and a table of addresses, consumers often are interested in a view of the data that is a single, nested data structure, e.g. a single Elasticsearch document representing a customer and all their addresses.

    This is where KIP-213 ("Kafka Improvement Proposal") and its foreign key joining capabilities come in: it was introduced in Apache Kafka 2.4 "to close the gap between the semantics of KTables in streams and tables in relational databases". Before KIP-213, in order to join messages from two Debezium change event topics, you’d typically have to manually re-key at least one of the topics, so to make sure the same key is used on both sides of the join.

    Thanks to KIP-213, this isn’t needed any longer, as it allows to join two Kafka topics on fields extracted from the Kafka message value, taking care of the required re-keying automatically, in a fully transparent way. Comparing to previous approaches, this drastically reduces the effort for creating aggregated events from Debezium’s CDC events.

    \ No newline at end of file diff --git a/tag/kafka-streams/index.html b/tag/kafka-streams/index.html index 1e0f5e168f..5af3f8b45b 100644 --- a/tag/kafka-streams/index.html +++ b/tag/kafka-streams/index.html @@ -1 +1 @@ - Tag: kafka-streams

    Debezium Blog

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    \ No newline at end of file + Tag: kafka-streams

    Debezium Blog

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    It is a common requirement for business applications to maintain some form of audit log, i.e. a persistent trail of all the changes to the application’s data. If you squint a bit, a Kafka topic with Debezium data change events is quite similar to that: sourced from database transaction logs, it describes all the changes to the records of an application. What’s missing though is some metadata: why, when and by whom was the data changed? In this post we’re going to explore how that metadata can be provided and exposed via change data capture (CDC), and how stream processing can be used to enrich the actual data change events with such metadata.

    \ No newline at end of file diff --git a/tag/kafka/index.html b/tag/kafka/index.html index ccef6fcbf2..cf9c811fe3 100644 --- a/tag/kafka/index.html +++ b/tag/kafka/index.html @@ -7,4 +7,4 @@ num.partitions = 1 compression.type = producer log.cleanup.policy = delete -log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    Although Debezium makes it easy to capture database changes and record them in Kafka, one of the more important decisions you have to make is how those change events will be serialized in Kafka. Every message in Kafka has a key and a value, and to Kafka these are opaque byte arrays. But when you set up Kafka Connect, you have to say how the Debezium event keys and values should be serialized to a binary form, and your consumers will also have to deserialize them back into a usable form.

    Debezium event keys and values are both structured, so JSON is certainly a reasonable option — it’s flexible, ubiquitous, and language agnostic, but on the other hand it’s quite verbose. One alternative is Avro, which is also flexible and language agnostic, but also faster and results in smaller binary representations. Using Avro requires a bit more setup effort on your part and some additional software, but the advantages are often worth it.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    \ No newline at end of file +log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    Although Debezium makes it easy to capture database changes and record them in Kafka, one of the more important decisions you have to make is how those change events will be serialized in Kafka. Every message in Kafka has a key and a value, and to Kafka these are opaque byte arrays. But when you set up Kafka Connect, you have to say how the Debezium event keys and values should be serialized to a binary form, and your consumers will also have to deserialize them back into a usable form.

    Debezium event keys and values are both structured, so JSON is certainly a reasonable option — it’s flexible, ubiquitous, and language agnostic, but on the other hand it’s quite verbose. One alternative is Avro, which is also flexible and language agnostic, but also faster and results in smaller binary representations. Using Avro requires a bit more setup effort on your part and some additional software, but the advantages are often worth it.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    \ No newline at end of file diff --git a/tag/kogito/index.html b/tag/kogito/index.html index 2106f94354..24c71cdbac 100644 --- a/tag/kogito/index.html +++ b/tag/kogito/index.html @@ -1 +1 @@ - Tag: kogito

    Debezium Blog

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    \ No newline at end of file + Tag: kogito

    Debezium Blog

    As a follow up to the recent Building Audit Logs with Change Data Capture and Stream Processing blog post, we’d like to extend the example with admin features to make it possible to capture and fix any missing transactional data.

    In the above mentioned blog post, there is a log enricher service used to combine data inserted or updated in the Vegetable database table with transaction context data such as

    • Transaction id

    • User name who performed the work

    • Use case that was behind the actual change e.g. "CREATE VEGETABLE"

    This all works well as long as all the changes are done via the vegetable service. But is this always the case?

    What about maintenance activities or migration scripts executed directly on the database level? There are still a lot of such activities going on, either on purpose or because that is our old habits we are trying to change…

    \ No newline at end of file diff --git a/tag/ksql/index.html b/tag/ksql/index.html index ddb3c4bf89..0f9c11ae7e 100644 --- a/tag/ksql/index.html +++ b/tag/ksql/index.html @@ -1 +1 @@ - Tag: ksql

    Debezium Blog

    Last updated at Nov 21st 2018 (adjusted to new KSQL Docker images).

    Last year we have seen the inception of a new open-source project in the Apache Kafka universe, KSQL, which is a streaming SQL engine build on top of Kafka Streams. In this post, we are going to try out KSQL querying with data change events generated by Debezium from a MySQL database.

    As a source of data we will use the database and setup from our tutorial. The result of this exercise should be similar to the recent post about aggregation of events into domain driven aggregates.

    \ No newline at end of file + Tag: ksql

    Debezium Blog

    Last updated at Nov 21st 2018 (adjusted to new KSQL Docker images).

    Last year we have seen the inception of a new open-source project in the Apache Kafka universe, KSQL, which is a streaming SQL engine build on top of Kafka Streams. In this post, we are going to try out KSQL querying with data change events generated by Debezium from a MySQL database.

    As a source of data we will use the database and setup from our tutorial. The result of this exercise should be similar to the recent post about aggregation of events into domain driven aggregates.

    \ No newline at end of file diff --git a/tag/kubernetes/index.html b/tag/kubernetes/index.html index 2411a5f6f4..131a9013ef 100644 --- a/tag/kubernetes/index.html +++ b/tag/kubernetes/index.html @@ -1 +1 @@ - Tag: kubernetes

    Debezium Blog

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    \ No newline at end of file + Tag: kubernetes

    Debezium Blog

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    \ No newline at end of file diff --git a/tag/lakehouse/index.html b/tag/lakehouse/index.html index 0084b08f78..b7a42a287f 100644 --- a/tag/lakehouse/index.html +++ b/tag/lakehouse/index.html @@ -1 +1 @@ - Tag: lakehouse

    Debezium Blog

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    \ No newline at end of file + Tag: lakehouse

    Debezium Blog

    Today, it is a common practise to build data lakes for analytics, reporting or machine learning needs.

    In this blog post we will describe a simple way to build a data lake. The solution is using a realtime data pipeline based on Debezium, supporting ACID transactions, SQL updates and is highly scalable. And it’s not required to have Apache Kafka or Apache Spark applications to build the data feed, reducing complexity of the overall solution.

    \ No newline at end of file diff --git a/tag/machine-learning/index.html b/tag/machine-learning/index.html index 52fc081231..9fdc0068b7 100644 --- a/tag/machine-learning/index.html +++ b/tag/machine-learning/index.html @@ -1 +1 @@ - Tag: machine-learning

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    \ No newline at end of file + Tag: machine-learning

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    \ No newline at end of file diff --git a/tag/mariadb/index.html b/tag/mariadb/index.html index 68c820101b..b4d423d2e9 100644 --- a/tag/mariadb/index.html +++ b/tag/mariadb/index.html @@ -1 +1 @@ - Tag: mariadb

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    \ No newline at end of file + Tag: mariadb

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    \ No newline at end of file diff --git a/tag/metrics/index.html b/tag/metrics/index.html index 4e7fce386a..5d31ed2cce 100644 --- a/tag/metrics/index.html +++ b/tag/metrics/index.html @@ -1 +1 @@ - Tag: metrics

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    \ No newline at end of file + Tag: metrics

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    \ No newline at end of file diff --git a/tag/microservices/index.html b/tag/microservices/index.html index 36c717bf7a..b2a263d62c 100644 --- a/tag/microservices/index.html +++ b/tag/microservices/index.html @@ -1 +1 @@ - Tag: microservices

    Debezium Blog

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    \ No newline at end of file + Tag: microservices

    Debezium Blog

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    As part of their business logic, microservices often do not only have to update their own local data store, but they also need to notify other services about data changes that happened. The outbox pattern describes an approach for letting services execute these two tasks in a safe and consistent manner; it provides source services with instant "read your own writes" semantics, while offering reliable, eventually consistent data exchange across service boundaries.

    \ No newline at end of file diff --git a/tag/mongo/index.html b/tag/mongo/index.html index 13590442d0..72f87b26e1 100644 --- a/tag/mongo/index.html +++ b/tag/mongo/index.html @@ -1 +1 @@ - Tag: mongo

    Debezium Blog

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    \ No newline at end of file + Tag: mongo

    Debezium Blog

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    \ No newline at end of file diff --git a/tag/mongodb/index.html b/tag/mongodb/index.html index 86266b37ee..d8a43b5c91 100644 --- a/tag/mongodb/index.html +++ b/tag/mongodb/index.html @@ -1 +1 @@ - Tag: mongodb

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    \ No newline at end of file + Tag: mongodb

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    \ No newline at end of file diff --git a/tag/monitoring/index.html b/tag/monitoring/index.html index 4c08ff551c..78c07d1f21 100644 --- a/tag/monitoring/index.html +++ b/tag/monitoring/index.html @@ -1 +1 @@ - Tag: monitoring

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    \ No newline at end of file + Tag: monitoring

    Debezium Blog

    In today’s dynamic data environments, detecting and understanding data mutation patterns is critical for system reliability. In this blog post, we’ll explore how to use Debezium for comprehensive database activity logging and analysis in microservice architectures. We’ll delve into how Debezium captures row-level changes and streams them in real-time, enabling immediate visibility into database operations. By integrating with analytics tools, we’ll see how to build detailed activity dashboards that reveal the volume and nature of operations per table. These insights are invaluable for identifying unexpected patterns, such as a sudden drop in inserts caused by a new microservice deployment with a bug. You will learn how to set up Debezium, configure it for this specific use case, and utilize the generated data to create actionable dashboards.

    \ No newline at end of file diff --git a/tag/mysql/index.html b/tag/mysql/index.html index d1d21b5408..9eed0f4a59 100644 --- a/tag/mysql/index.html +++ b/tag/mysql/index.html @@ -1 +1 @@ - Tag: mysql

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    The engineering team at Shopify recently improved the Debezium MySQL connector so that it supports incremental snapshotting for databases without write access by the connector, which is required when pointing Debezium to read-only replicas. In addition, the Debezium MySQL connector now also allows schema changes during an incremental snapshot. This blog post explains the implementation details of those features.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    When a Debezium connector is deployed to a Kafka Connect instance it is sometimes necessary to keep database credentials hidden from other users of the Connect API.

    Let’s remind how a connector registration request looks like for the MySQL Debezium connector:

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    Last updated at Nov 21st 2018 (adjusted to new KSQL Docker images).

    Last year we have seen the inception of a new open-source project in the Apache Kafka universe, KSQL, which is a streaming SQL engine build on top of Kafka Streams. In this post, we are going to try out KSQL querying with data change events generated by Debezium from a MySQL database.

    As a source of data we will use the database and setup from our tutorial. The result of this exercise should be similar to the recent post about aggregation of events into domain driven aggregates.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    A user of the Debezium connector for MySQL informed us about a potential issue with the configuration of the connector’s internal database history topic, which may cause the deletion of parts of that topic (DBZ-663). Please continue reading if you’re using the Debezium MySQL connector in versions 0.7.3 or 0.7.4.

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    This post originally appeared on the WePay Engineering blog.

    Change data capture has been around for a while, but some recent developments in technology have given it new life. Notably, using Kafka as a backbone to stream your database data in realtime has become increasingly common.

    If you’re wondering why you might want to stream database changes into Kafka, I highly suggest reading The Hardest Part About Microservices: Your Data. At WePay, we wanted to integrate our microservices and downstream datastores with each other, so every system could get access to the data that it needed. We use Kafka as our data integration layer, so we needed a way to get our database data into it.

    Last year, Yelp’s engineering team published an excellent series of posts on their data pipeline. These included a discussion on how they stream MySQL data into Kafka. Their architecture involves a series of homegrown pieces of software to accomplish the task, notably schematizer and MySQL streamer. The write-up triggered a thoughtful post on Debezium’s blog about a proposed equivalent architecture using Kafka connect, Debezium, and Confluent’s schema registry. This proposed architecture is what we’ve been implementing at WePay, and this post describes how we leverage Debezium and Kafka connect to stream our MySQL databases into Kafka.

    We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

    Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

    MySQL 5.7 introduced a new data type for storing and working with JSON data. Clients can define tables with columns using the new JSON datatype, and they can store and read JSON data using SQL statements and new built-in JSON functions to construct JSON data from other relational columns, introspect the structure of JSON values, and search within and manipulate JSON data. It possible to define generated columns on tables whose values are computed from the JSON value in another column of the same table, and to then define indexes with those generated columns. Overall, this is really a very powerful feature in MySQL.

    Debezium’s MySQL connector will support the JSON datatype starting with the upcoming 0.3.4 release. JSON document, array, and scalar values will appear in change events as strings with io.debezium.data.json for the schema name. This will make it natural for consumers to work with JSON data. BTW, this is the same semantic schema type used by the MongoDB connector to represent JSON data.

    This sounds straightforward, and we hope it is. But implementing this required a fair amount of work. That’s because although MySQL exposes JSON data as strings to client applications, internally it stores all JSON data in a special binary form that allows the MySQL engine to efficiently access the JSON data in queries, JSON functions and generated columns. All JSON data appears in the binlog in this binary form as well, which meant that we had to parse the binary form ourselves if we wanted to extract the more useful string representation. Writing and testing this parser took a bit of time and effort, and ultimately we donated it to the excellent MySQL binlog client library that the connector uses internally to read the binlog events.

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

    Change data capture is a hot topic. Debezium’s goal is to make change data capture easy for multiple DBMSes, but admittedly we’re still a young open source project and so far we’ve only released a connector for MySQL with a connector for MongoDB that’s just around the corner. So it’s great to see how others are using and implementing change data capture. In this post, we’ll review Yelp’s approach and see how it is strikingly similar to Debezium’s MySQL connector.

    I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

    I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

    Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    When our MySQL connector is reading the binlog of a MySQL server or cluster, it parses the DDL statements in the log and builds an in-memory model of each table’s schema as it evolves over time. This process is important because the connector generates events for each table using the definition of the table at the time of each event. We can’t use the database’s current schema, since it may have changed since the point in time (or position in the log) where the connector is reading.

    Parsing DDL of MySQL or any other major relational database can seem to be a daunting task. Usually each DBMS has a highly-customized SQL grammar, and although the data manipulation language (DML) statements are often fairly close the standards, the data definition language (DDL) statements are usually less so and involve more DBMS-specific features.

    So given this, why did we write our own DDL parser for MySQL? Let’s first look at what Debezium needs a DDL parser to do.

    Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Now the good news — Debezium 0.1 is now available and includes several significant features:

    • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

    • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

    \ No newline at end of file + Tag: mysql

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    The engineering team at Shopify recently improved the Debezium MySQL connector so that it supports incremental snapshotting for databases without write access by the connector, which is required when pointing Debezium to read-only replicas. In addition, the Debezium MySQL connector now also allows schema changes during an incremental snapshot. This blog post explains the implementation details of those features.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    When a Debezium connector is deployed to a Kafka Connect instance it is sometimes necessary to keep database credentials hidden from other users of the Connect API.

    Let’s remind how a connector registration request looks like for the MySQL Debezium connector:

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    Last updated at Nov 21st 2018 (adjusted to new KSQL Docker images).

    Last year we have seen the inception of a new open-source project in the Apache Kafka universe, KSQL, which is a streaming SQL engine build on top of Kafka Streams. In this post, we are going to try out KSQL querying with data change events generated by Debezium from a MySQL database.

    As a source of data we will use the database and setup from our tutorial. The result of this exercise should be similar to the recent post about aggregation of events into domain driven aggregates.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    A user of the Debezium connector for MySQL informed us about a potential issue with the configuration of the connector’s internal database history topic, which may cause the deletion of parts of that topic (DBZ-663). Please continue reading if you’re using the Debezium MySQL connector in versions 0.7.3 or 0.7.4.

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    This post originally appeared on the WePay Engineering blog.

    Change data capture has been around for a while, but some recent developments in technology have given it new life. Notably, using Kafka as a backbone to stream your database data in realtime has become increasingly common.

    If you’re wondering why you might want to stream database changes into Kafka, I highly suggest reading The Hardest Part About Microservices: Your Data. At WePay, we wanted to integrate our microservices and downstream datastores with each other, so every system could get access to the data that it needed. We use Kafka as our data integration layer, so we needed a way to get our database data into it.

    Last year, Yelp’s engineering team published an excellent series of posts on their data pipeline. These included a discussion on how they stream MySQL data into Kafka. Their architecture involves a series of homegrown pieces of software to accomplish the task, notably schematizer and MySQL streamer. The write-up triggered a thoughtful post on Debezium’s blog about a proposed equivalent architecture using Kafka connect, Debezium, and Confluent’s schema registry. This proposed architecture is what we’ve been implementing at WePay, and this post describes how we leverage Debezium and Kafka connect to stream our MySQL databases into Kafka.

    We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

    Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

    MySQL 5.7 introduced a new data type for storing and working with JSON data. Clients can define tables with columns using the new JSON datatype, and they can store and read JSON data using SQL statements and new built-in JSON functions to construct JSON data from other relational columns, introspect the structure of JSON values, and search within and manipulate JSON data. It possible to define generated columns on tables whose values are computed from the JSON value in another column of the same table, and to then define indexes with those generated columns. Overall, this is really a very powerful feature in MySQL.

    Debezium’s MySQL connector will support the JSON datatype starting with the upcoming 0.3.4 release. JSON document, array, and scalar values will appear in change events as strings with io.debezium.data.json for the schema name. This will make it natural for consumers to work with JSON data. BTW, this is the same semantic schema type used by the MongoDB connector to represent JSON data.

    This sounds straightforward, and we hope it is. But implementing this required a fair amount of work. That’s because although MySQL exposes JSON data as strings to client applications, internally it stores all JSON data in a special binary form that allows the MySQL engine to efficiently access the JSON data in queries, JSON functions and generated columns. All JSON data appears in the binlog in this binary form as well, which meant that we had to parse the binary form ourselves if we wanted to extract the more useful string representation. Writing and testing this parser took a bit of time and effort, and ultimately we donated it to the excellent MySQL binlog client library that the connector uses internally to read the binlog events.

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

    Change data capture is a hot topic. Debezium’s goal is to make change data capture easy for multiple DBMSes, but admittedly we’re still a young open source project and so far we’ve only released a connector for MySQL with a connector for MongoDB that’s just around the corner. So it’s great to see how others are using and implementing change data capture. In this post, we’ll review Yelp’s approach and see how it is strikingly similar to Debezium’s MySQL connector.

    I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

    I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

    Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    When our MySQL connector is reading the binlog of a MySQL server or cluster, it parses the DDL statements in the log and builds an in-memory model of each table’s schema as it evolves over time. This process is important because the connector generates events for each table using the definition of the table at the time of each event. We can’t use the database’s current schema, since it may have changed since the point in time (or position in the log) where the connector is reading.

    Parsing DDL of MySQL or any other major relational database can seem to be a daunting task. Usually each DBMS has a highly-customized SQL grammar, and although the data manipulation language (DML) statements are often fairly close the standards, the data definition language (DDL) statements are usually less so and involve more DBMS-specific features.

    So given this, why did we write our own DDL parser for MySQL? Let’s first look at what Debezium needs a DDL parser to do.

    Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Now the good news — Debezium 0.1 is now available and includes several significant features:

    • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

    • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

    \ No newline at end of file diff --git a/tag/news/index.html b/tag/news/index.html index 0a9a9ae07c..aa11b22ae0 100644 --- a/tag/news/index.html +++ b/tag/news/index.html @@ -7,4 +7,4 @@ num.partitions = 1 compression.type = producer log.cleanup.policy = delete -log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    Hello everyone, my name is René Kerner and I recently joined Red Hat and the Debezium team.

    I was working at trivago since 2011, and in 2016 we started using Debezium at version 0.4/0.5 for capturing clickstreams in the offshore datacenters into Kafka and aggregate them in the central cluster. We really intensified Debezium usage within one year and in 2017 we also used it for trivago’s main data.

    In 2014 I did my first OSS contributions to Composer, PHP’s dependency management and gave my first talk on it at the Developer Conference (called code.talks for many years now). Then in 2017 I did my first contributions to Debezium with work on the MySQL snapshot process and fixing a MySQL TIME data type issue.

    In 2018 I left trivago and started working at Codecentric as a consultant for software architecture and development (mainly JVM focus) and Apache Kafka, doing many trainings and workshops at German "Fortune 500" companies (insurances, industrial sector, media). I was doing lots of networking at that time, where I learned how awesome the community around Kafka is. I was always quite sad I didn’t have more time to focus on OSS projects.

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

    Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    Hello everyone, my name is Chris Cranford and I recently joined the Debezium team.

    My journey at Red Hat began just over three years ago; however I have been in this line of work for nearly twenty years. All throughout my career, I have advocated and supported open source software. Many of my initial software endeavors were based on open source software, several which are still heavily used today such as Hibernate ORM.

    When I first learned about the Debezium project last year, I was very excited about it right away.

    I could see how this project would be very useful for many people out there and I was very impressed by the professional way it was set up: a solid architecture for change data capture based on Apache Kafka, a strong focus on robustness and correctness also in the case of failures, the overall idea of creating a diverse eco-system of CDC connectors. All that based on the principles of open source, combined with extensive documentation from day one, a friendly and welcoming web site and a great getting-started experience.

    So you can imagine that I was more than enthusiastic about the opportunity to take over the role of Debezium’s project lead. Debezium and CDC have close links to some data-centric projects I’ve been previously working on and also tie in with ideas I’ve been pursuing around CQRS, even sourcing and denormalization. As core member of the Hibernate team at Red Hat, I’ve implemented the initial Elasticsearch support for Hibernate Search (which deals with full-text index updates via JPA/Hibernate). I’ve also contributed to Hibernate OGM - a project which connects JPA and the world of NoSQL. One of the plans for OGM is to create a declarative denormalization engine for creating read models optimized for specific use cases. It will be very interesting to see how this plays together with the capabilities provided by Debezium.

    Just before I started the Debezium project in early 2016, Martin Kleppmann gave several presentations about turning the database inside out and how his Bottled Water project demonstrated the importantance that change data capture can play in using Kafka for stream processing. Then Kafka Connect was announced, and at that point it seemed obvious to me that Kafka Connect was the foundation upon which practical and reusable change data capture can be built. As these techniques and technologies were becoming more important to Red Hat, I was given the opportunity to start a new open source project and community around building great CDC connectors for a variety of databases management systems.

    Over the past few years, we have created Kafka Connect connectors for MySQL, then MongoDB, and most recently PostgreSQL. Each were initially limited and had a number of problems and issues, but over time more and more people have tried the connectors, asked questions, answered questions, mentioned Debezium on Twitter, tested connectors in their own environments, reported problems, fixed bugs, discussed limitations and potential new features, implemented enhancements and new features, improved the documentation, and wrote blog posts. Simply put, people with similar needs and interests have worked together and have formed a community. Additional connectors for Oracle and SQL Server are in the works, but could use some help to move things along more quickly.

    It’s really exciting to see how far we’ve come and how the Debezium community continues to evolve and grow. And it’s perhaps as good a time as any to hand the reigns over to someone else. In fact, after nearly 10 wonderful years at Red Hat, I’m making a bigger change and as of today am part of Confluent’s engineering team, where I expect to play a more active role in the broader Kafka community and more directly with Kafka Connect and Kafka Streams. I definitely plan to stay involved in the Debezium community, but will no longer be leading the project. That role will instead be filled by Gunnar Morling, who’s recently joined the Debezium community but has extensive experience in open source, the Hibernate community, and the Bean Validation specification effort. Gunnar is a great guy and an excellent developer, and will be an excellent lead for the Debezium community.

    \ No newline at end of file +log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    Hello everyone, my name is René Kerner and I recently joined Red Hat and the Debezium team.

    I was working at trivago since 2011, and in 2016 we started using Debezium at version 0.4/0.5 for capturing clickstreams in the offshore datacenters into Kafka and aggregate them in the central cluster. We really intensified Debezium usage within one year and in 2017 we also used it for trivago’s main data.

    In 2014 I did my first OSS contributions to Composer, PHP’s dependency management and gave my first talk on it at the Developer Conference (called code.talks for many years now). Then in 2017 I did my first contributions to Debezium with work on the MySQL snapshot process and fixing a MySQL TIME data type issue.

    In 2018 I left trivago and started working at Codecentric as a consultant for software architecture and development (mainly JVM focus) and Apache Kafka, doing many trainings and workshops at German "Fortune 500" companies (insurances, industrial sector, media). I was doing lots of networking at that time, where I learned how awesome the community around Kafka is. I was always quite sad I didn’t have more time to focus on OSS projects.

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

    Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    Hello everyone, my name is Chris Cranford and I recently joined the Debezium team.

    My journey at Red Hat began just over three years ago; however I have been in this line of work for nearly twenty years. All throughout my career, I have advocated and supported open source software. Many of my initial software endeavors were based on open source software, several which are still heavily used today such as Hibernate ORM.

    When I first learned about the Debezium project last year, I was very excited about it right away.

    I could see how this project would be very useful for many people out there and I was very impressed by the professional way it was set up: a solid architecture for change data capture based on Apache Kafka, a strong focus on robustness and correctness also in the case of failures, the overall idea of creating a diverse eco-system of CDC connectors. All that based on the principles of open source, combined with extensive documentation from day one, a friendly and welcoming web site and a great getting-started experience.

    So you can imagine that I was more than enthusiastic about the opportunity to take over the role of Debezium’s project lead. Debezium and CDC have close links to some data-centric projects I’ve been previously working on and also tie in with ideas I’ve been pursuing around CQRS, even sourcing and denormalization. As core member of the Hibernate team at Red Hat, I’ve implemented the initial Elasticsearch support for Hibernate Search (which deals with full-text index updates via JPA/Hibernate). I’ve also contributed to Hibernate OGM - a project which connects JPA and the world of NoSQL. One of the plans for OGM is to create a declarative denormalization engine for creating read models optimized for specific use cases. It will be very interesting to see how this plays together with the capabilities provided by Debezium.

    Just before I started the Debezium project in early 2016, Martin Kleppmann gave several presentations about turning the database inside out and how his Bottled Water project demonstrated the importantance that change data capture can play in using Kafka for stream processing. Then Kafka Connect was announced, and at that point it seemed obvious to me that Kafka Connect was the foundation upon which practical and reusable change data capture can be built. As these techniques and technologies were becoming more important to Red Hat, I was given the opportunity to start a new open source project and community around building great CDC connectors for a variety of databases management systems.

    Over the past few years, we have created Kafka Connect connectors for MySQL, then MongoDB, and most recently PostgreSQL. Each were initially limited and had a number of problems and issues, but over time more and more people have tried the connectors, asked questions, answered questions, mentioned Debezium on Twitter, tested connectors in their own environments, reported problems, fixed bugs, discussed limitations and potential new features, implemented enhancements and new features, improved the documentation, and wrote blog posts. Simply put, people with similar needs and interests have worked together and have formed a community. Additional connectors for Oracle and SQL Server are in the works, but could use some help to move things along more quickly.

    It’s really exciting to see how far we’ve come and how the Debezium community continues to evolve and grow. And it’s perhaps as good a time as any to hand the reigns over to someone else. In fact, after nearly 10 wonderful years at Red Hat, I’m making a bigger change and as of today am part of Confluent’s engineering team, where I expect to play a more active role in the broader Kafka community and more directly with Kafka Connect and Kafka Streams. I definitely plan to stay involved in the Debezium community, but will no longer be leading the project. That role will instead be filled by Gunnar Morling, who’s recently joined the Debezium community but has extensive experience in open source, the Hibernate community, and the Bean Validation specification effort. Gunnar is a great guy and an excellent developer, and will be an excellent lead for the Debezium community.

    \ No newline at end of file diff --git a/tag/newsletter/index.html b/tag/newsletter/index.html index 969d485aa3..b57fbc97bb 100644 --- a/tag/newsletter/index.html +++ b/tag/newsletter/index.html @@ -1 +1 @@ - Tag: newsletter

    Debezium Blog

    Welcome to the newest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    It’s been a long time since our last edition. But we are back again! In case you missed our last edition, you can check it out here.

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    \ No newline at end of file + Tag: newsletter

    Debezium Blog

    Welcome to the newest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    It’s been a long time since our last edition. But we are back again! In case you missed our last edition, you can check it out here.

    Welcome to the latest edition of the Debezium community newsletter, in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    In case you missed our last edition, you can check it out here.

    Welcome to the Debezium community newsletter in which we share all things CDC related including blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    Welcome to the first edition of the Debezium community newsletter in which we share blog posts, group discussions, as well as StackOverflow questions that are relevant to our user community.

    \ No newline at end of file diff --git a/tag/notifications/index.html b/tag/notifications/index.html index 88d7217366..307c278e85 100644 --- a/tag/notifications/index.html +++ b/tag/notifications/index.html @@ -1 +1 @@ - Tag: notifications

    Debezium Blog

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    \ No newline at end of file + Tag: notifications

    Debezium Blog

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    \ No newline at end of file diff --git a/tag/online-learning/index.html b/tag/online-learning/index.html index 5b595fbfd8..468397af24 100644 --- a/tag/online-learning/index.html +++ b/tag/online-learning/index.html @@ -1 +1 @@ - Tag: online-learning

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    \ No newline at end of file + Tag: online-learning

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    \ No newline at end of file diff --git a/tag/operator/index.html b/tag/operator/index.html index b48fda63d7..564c3347a2 100644 --- a/tag/operator/index.html +++ b/tag/operator/index.html @@ -1 +1 @@ - Tag: operator

    Debezium Blog

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    \ No newline at end of file + Tag: operator

    Debezium Blog

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    With Debezium 2.3, we introduced a preview of a brand new Debezium Operator with the aim to provide seamless deployment of Debezium Server to Kubernetes (k8s) clusters. The Debezium 2.4.0.Final release brings the next step towards the full support of this component. With this release, we are happy to announce that Debezium Operator is now available in the OperatorHub catalog for Kubernetes as well as in the community operator catalog embedded in the OpenShift and OKD distributions. The operator remains in the incubation phase; however, the full support of this component is approaching fast.

    \ No newline at end of file diff --git a/tag/oracle/index.html b/tag/oracle/index.html index b27e31aa43..2b87d3b435 100644 --- a/tag/oracle/index.html +++ b/tag/oracle/index.html @@ -1 +1 @@ - Tag: oracle

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    This post is the final part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first installment of this series is found here and the second installment is found here.

    In this third and final installment, we are going to build on what we have done in the previous two posts, focusing on the following areas:

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first part of this series is here.

    In this second installment, we will build on what we did in part one by deploying the Oracle connector using Zookeeper, Kafka, and Kafka Connect. We are going to discuss a variety of configuration options for the connector and why they’re essential. And finally, we’re going to see the connector in action!

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. Throughout the series, we’ll examine all the steps to setting up a proof of concept (POC) deployment for Debezium for Oracle. We will discuss setup and configurations as well as the nuances of multi-tenancy. We will also dive into any known pitfalls and concerns you may need to know and how to debug specific problems. And finally, we’ll talk about performance and monitoring to maintain a healthy connector deployment.

    Throughout this exercise, we hope that this will show you just how simple it is to deploy Debezium for Oracle. This installation and setup portion of the series may seem quite complicated, but many of these steps likely already exist in a pre-existing environment. We will dive into each step, explaining it is essential should you use a container image deployment.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    \ No newline at end of file + Tag: oracle

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    This post is the final part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first installment of this series is found here and the second installment is found here.

    In this third and final installment, we are going to build on what we have done in the previous two posts, focusing on the following areas:

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. In case you missed it, the first part of this series is here.

    In this second installment, we will build on what we did in part one by deploying the Oracle connector using Zookeeper, Kafka, and Kafka Connect. We are going to discuss a variety of configuration options for the connector and why they’re essential. And finally, we’re going to see the connector in action!

    This post is part of a 3-part series to explore using Debezium to ingest changes from an Oracle database using Oracle LogMiner. Throughout the series, we’ll examine all the steps to setting up a proof of concept (POC) deployment for Debezium for Oracle. We will discuss setup and configurations as well as the nuances of multi-tenancy. We will also dive into any known pitfalls and concerns you may need to know and how to debug specific problems. And finally, we’ll talk about performance and monitoring to maintain a healthy connector deployment.

    Throughout this exercise, we hope that this will show you just how simple it is to deploy Debezium for Oracle. This installation and setup portion of the series may seem quite complicated, but many of these steps likely already exist in a pre-existing environment. We will dive into each step, explaining it is essential should you use a container image deployment.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    \ No newline at end of file diff --git a/tag/outbox/index.html b/tag/outbox/index.html index 5a7b406735..80a539c555 100644 --- a/tag/outbox/index.html +++ b/tag/outbox/index.html @@ -1 +1 @@ - Tag: outbox

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    \ No newline at end of file + Tag: outbox

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    Welcome to the first edition of "Debezium Community Stories With…​", a new series of interviews with members of the Debezium and change data capture community, such as users, contributors or integrators. We’re planning to publish more parts of this series in a loose rhythm, so if you’d like to be part of it, please let us know. In today’s edition it’s my pleasure to talk to Renato Mefi, a long-time Debezium user and contributor.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    \ No newline at end of file diff --git a/tag/performance/index.html b/tag/performance/index.html index 0cddfe6c76..e8688690df 100644 --- a/tag/performance/index.html +++ b/tag/performance/index.html @@ -1 +1 @@ - Tag: performance

    Debezium Blog

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    \ No newline at end of file + Tag: performance

    Debezium Blog

    In the realm of data streaming optimization, even subtle improvements can make a significant impact. This article focuses on one such refinement: the introduction of batch support in Debezium’s JDBC connector. We’ll guide you through the process of enabling batches and share the practical outcomes of our performance testing.

    \ No newline at end of file diff --git a/tag/postgres/index.html b/tag/postgres/index.html index 7558d0b88f..ccb13f3213 100644 --- a/tag/postgres/index.html +++ b/tag/postgres/index.html @@ -1 +1 @@ - Tag: postgres

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    Setting up change data capture (CDC) pipelines with Debezium typically is a matter of configuration, without any programming being involved. It’s still a very good idea to have automated tests for your CDC set-up, making sure that everything is configured correctly and that your Debezium connectors are set up as intended.

    There’s two main components involved whose configuration need consideration:

    • The source database: it must be set up so that Debezium can connect to it and retrieve change events; details depend on the specific database, e.g. for MySQL the binlog must be in "row" mode, for Postgres, one of the supported logical decoding plug-ins must be installed, etc.

    • The Debezium connector: it must be configured using the right database host and credentials, possibly using SSL, applying table and column filters, potentially one or more single message transformations (SMTs), etc.

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    In this blog post, we are going to discuss how Delhivery, the leading supply chain services company in India, is using Debezium to power a lot of different business use-cases ranging from driving event driven microservices, providing data integration and moving operational data to a data warehouse for real-time analytics and reporting. We will also take a look at the early mistakes we made when integrating Debezium and how we solved them so that any future users can avoid them, discuss one of the more challenging production incidents we faced and how Debezium helped ensure we could recover without any data loss. In closing, we discuss what value Debezium has provided us, areas where we believe there is a scope for improvement and how Debezium fits into our future goals.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    The Debezium project strives to provide an easy deployment of connectors, so users can try and run connectors of their choice mostly by getting the right connector archive and unpacking it into the plug-in path of Kafka Connect.

    This is true for all connectors but for the Debezium PostgreSQL connector. This connector is specific in the regard that it requires a logical decoding plug-in to be installed inside the PostgreSQL source database(s) themselves. Currently, there are two supported logical plug-ins:

    • postgres-decoderbufs, which uses Protocol Buffers as a very compact transport format and which is maintained by the Debezium community

    • JSON-based, which is based on JSON and which is maintained by its own upstream community

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    With the recent Debezium release, we’re happy to announce that a new PostgreSQL connector has been added alongside the already existing MySQL and MongoDB connectors.

    \ No newline at end of file + Tag: postgres

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    Setting up change data capture (CDC) pipelines with Debezium typically is a matter of configuration, without any programming being involved. It’s still a very good idea to have automated tests for your CDC set-up, making sure that everything is configured correctly and that your Debezium connectors are set up as intended.

    There’s two main components involved whose configuration need consideration:

    • The source database: it must be set up so that Debezium can connect to it and retrieve change events; details depend on the specific database, e.g. for MySQL the binlog must be in "row" mode, for Postgres, one of the supported logical decoding plug-ins must be installed, etc.

    • The Debezium connector: it must be configured using the right database host and credentials, possibly using SSL, applying table and column filters, potentially one or more single message transformations (SMTs), etc.

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    In this blog post, we are going to discuss how Delhivery, the leading supply chain services company in India, is using Debezium to power a lot of different business use-cases ranging from driving event driven microservices, providing data integration and moving operational data to a data warehouse for real-time analytics and reporting. We will also take a look at the early mistakes we made when integrating Debezium and how we solved them so that any future users can avoid them, discuss one of the more challenging production incidents we faced and how Debezium helped ensure we could recover without any data loss. In closing, we discuss what value Debezium has provided us, areas where we believe there is a scope for improvement and how Debezium fits into our future goals.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    Let’s talk about TOAST. Toast? No, TOAST!

    So what’s that? TOAST (The Oversized-Attribute Storage Technique) is a mechanism in Postgres which stores large column values in multiple physical rows, circumventing the page size limit of 8 KB.

    TOAST!

    Typically, TOAST storage is transparent to the user, so you don’t really have to care about it. There’s an exception, though: if a table row has changed, any unchanged values that were stored using the TOAST mechanism are not included in the message that Debezium receives from the database, unless they are part of the table’s replica identity. Consequently, such unchanged TOAST column value will not be contained in Debezium data change events sent to Apache Kafka. In this post we’re going to discuss different strategies for dealing with this situation.

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    The Debezium project strives to provide an easy deployment of connectors, so users can try and run connectors of their choice mostly by getting the right connector archive and unpacking it into the plug-in path of Kafka Connect.

    This is true for all connectors but for the Debezium PostgreSQL connector. This connector is specific in the regard that it requires a logical decoding plug-in to be installed inside the PostgreSQL source database(s) themselves. Currently, there are two supported logical plug-ins:

    • postgres-decoderbufs, which uses Protocol Buffers as a very compact transport format and which is maintained by the Debezium community

    • JSON-based, which is based on JSON and which is maintained by its own upstream community

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    With the recent Debezium release, we’re happy to announce that a new PostgreSQL connector has been added alongside the already existing MySQL and MongoDB connectors.

    \ No newline at end of file diff --git a/tag/presentation/index.html b/tag/presentation/index.html index 4873d40420..5e0038c48d 100644 --- a/tag/presentation/index.html +++ b/tag/presentation/index.html @@ -1 +1 @@ - Tag: presentation

    Debezium Blog

    Last week I had the pleasure to do a webinar on change data streaming patterns for microservices with the fabulous Burr Sutter at DevNation Live.

    The recording of that 30 min session is available on YouTube now. It also contains a demo that shows how to set-up a data streaming pipeline with Debezium and Apache Kafka, running on OpenShift. The demo begins at 12 min 40 into the recording.

    Enjoy!

    Debezium’s project lead Gunnar Morling gave a few talks during recent Devoxx Belgium 2017. One of his talks was dedicated to Debezium and change data capture in general.

    If you are interested in those topics and you want to obtain a fast and simple introduction to it, do not hesitate and watch the talk. Batteries and demo included!

    \ No newline at end of file + Tag: presentation

    Debezium Blog

    Last week I had the pleasure to do a webinar on change data streaming patterns for microservices with the fabulous Burr Sutter at DevNation Live.

    The recording of that 30 min session is available on YouTube now. It also contains a demo that shows how to set-up a data streaming pipeline with Debezium and Apache Kafka, running on OpenShift. The demo begins at 12 min 40 into the recording.

    Enjoy!

    Debezium’s project lead Gunnar Morling gave a few talks during recent Devoxx Belgium 2017. One of his talks was dedicated to Debezium and change data capture in general.

    If you are interested in those topics and you want to obtain a fast and simple introduction to it, do not hesitate and watch the talk. Batteries and demo included!

    \ No newline at end of file diff --git a/tag/production/index.html b/tag/production/index.html index d071121668..2644f1c4cc 100644 --- a/tag/production/index.html +++ b/tag/production/index.html @@ -7,4 +7,4 @@ num.partitions = 1 compression.type = producer log.cleanup.policy = delete -log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    \ No newline at end of file +log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    \ No newline at end of file diff --git a/tag/quarkus/index.html b/tag/quarkus/index.html index a308de150c..0b0b4f631a 100644 --- a/tag/quarkus/index.html +++ b/tag/quarkus/index.html @@ -1 +1 @@ - Tag: quarkus

    Debezium Blog

    Kafka Streams is a library for developing stream processing applications based on Apache Kafka. Quoting its docs, "a Kafka Streams application processes record streams through a topology in real-time, processing data continuously, concurrently, and in a record-by-record manner". The Kafka Streams DSL provides a range of stream processing operations such as a map, filter, join, and aggregate.

    Non-Key Joins in Kafka Streams

    Debezium’s CDC source connectors make it easy to capture data changes in databases and push them towards sink systems such as Elasticsearch in near real-time. By default, this results in a 1:1 relationship between tables in the source database, the corresponding Kafka topics, and a representation of the data at the sink side, such as a search index in Elasticsearch.

    In case of 1:n relationships, say between a table of customers and a table of addresses, consumers often are interested in a view of the data that is a single, nested data structure, e.g. a single Elasticsearch document representing a customer and all their addresses.

    This is where KIP-213 ("Kafka Improvement Proposal") and its foreign key joining capabilities come in: it was introduced in Apache Kafka 2.4 "to close the gap between the semantics of KTables in streams and tables in relational databases". Before KIP-213, in order to join messages from two Debezium change event topics, you’d typically have to manually re-key at least one of the topics, so to make sure the same key is used on both sides of the join.

    Thanks to KIP-213, this isn’t needed any longer, as it allows to join two Kafka topics on fields extracted from the Kafka message value, taking care of the required re-keying automatically, in a fully transparent way. Comparing to previous approaches, this drastically reduces the effort for creating aggregated events from Debezium’s CDC events.

    One of the typical Debezium uses cases is to use change data capture to integrate a legacy system with other systems in the organization. There are multiple ways how to achieve this goal

    • Write data to Kafka using Debezium and follow with a combination of Kafka Streams pipelines and Kafka Connect connectors to deliver the changes to other systems

    • Use Debezium Embedded engine in a Java standalone application and write the integration code using plain Java; that’s often used to send change events to alternative messaging infrastructure such as Amazon Kinesis, Google Pub/Sub etc.

    • Use an existing integration framework or service bus to express the pipeline logic

    This article is focusing on the third option - a dedicated integration framework.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    \ No newline at end of file + Tag: quarkus

    Debezium Blog

    Kafka Streams is a library for developing stream processing applications based on Apache Kafka. Quoting its docs, "a Kafka Streams application processes record streams through a topology in real-time, processing data continuously, concurrently, and in a record-by-record manner". The Kafka Streams DSL provides a range of stream processing operations such as a map, filter, join, and aggregate.

    Non-Key Joins in Kafka Streams

    Debezium’s CDC source connectors make it easy to capture data changes in databases and push them towards sink systems such as Elasticsearch in near real-time. By default, this results in a 1:1 relationship between tables in the source database, the corresponding Kafka topics, and a representation of the data at the sink side, such as a search index in Elasticsearch.

    In case of 1:n relationships, say between a table of customers and a table of addresses, consumers often are interested in a view of the data that is a single, nested data structure, e.g. a single Elasticsearch document representing a customer and all their addresses.

    This is where KIP-213 ("Kafka Improvement Proposal") and its foreign key joining capabilities come in: it was introduced in Apache Kafka 2.4 "to close the gap between the semantics of KTables in streams and tables in relational databases". Before KIP-213, in order to join messages from two Debezium change event topics, you’d typically have to manually re-key at least one of the topics, so to make sure the same key is used on both sides of the join.

    Thanks to KIP-213, this isn’t needed any longer, as it allows to join two Kafka topics on fields extracted from the Kafka message value, taking care of the required re-keying automatically, in a fully transparent way. Comparing to previous approaches, this drastically reduces the effort for creating aggregated events from Debezium’s CDC events.

    One of the typical Debezium uses cases is to use change data capture to integrate a legacy system with other systems in the organization. There are multiple ways how to achieve this goal

    • Write data to Kafka using Debezium and follow with a combination of Kafka Streams pipelines and Kafka Connect connectors to deliver the changes to other systems

    • Use Debezium Embedded engine in a Java standalone application and write the integration code using plain Java; that’s often used to send change events to alternative messaging infrastructure such as Amazon Kinesis, Google Pub/Sub etc.

    • Use an existing integration framework or service bus to express the pipeline logic

    This article is focusing on the third option - a dedicated integration framework.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    This article is a dive into the realms of Event Sourcing, Command Query Responsibility Segregation (CQRS), Change Data Capture (CDC), and the Outbox Pattern. Much needed clarity on the value of these solutions will be presented. Additionally, two differing designs will be explained in detail with the pros/cons of each.

    So why do all these solutions even matter? They matter because many teams are building microservices and distributing data across multiple data stores. One system of microservices might involve relational databases, object stores, in-memory caches, and even searchable indexes of data. Data can quickly become lost, out of sync, or even corrupted therefore resulting in disastrous consequences for mission critical systems.

    Solutions that help avoid these serious problems are of paramount importance for many organizations. Unfortunately, many vital solutions are somewhat difficult to understand; Event Sourcing, CQRS, CDC, and Outbox are no exception. Please look at these solutions as an opportunity to learn and understand how they could apply to your specific use cases.

    As you will find out at the end of this article, I will propose that three of these four solutions have high value, while the other should be discouraged except for the rarest of circumstances. The advice given in this article should be evaluated against your specific needs, because, in some cases, none of these four solutions would be a good fit.

    Outbox as in that folder in my email client? No, not exactly but there are some similarities!

    The term outbox describes a pattern that allows independent components or services to perform read your own write semantics while concurrently providing a reliable, eventually consistent view to those writes across component or service boundaries.

    You can read more about the Outbox pattern and how it applies to microservices in our blog post, Reliable Microservices Data Exchange With the Outbox Patttern.

    So what exactly is an Outbox Event Router?

    In Debezium version 0.9.3.Final, we introduced a ready-to-use Single Message Transform (SMT) that builds on the Outbox pattern to propagate data change events using Debezium and Kafka. Please see the documentation for details on how to use this transformation.

    Last week’s announcement of Quarkus sparked a great amount of interest in the Java community: crafted from the best of breed Java libraries and standards, it allows to build Kubernetes-native applications based on GraalVM & OpenJDK HotSpot. In this blog post we are going to demonstrate how a Quarkus-based microservice can consume Debezium’s data change events via Apache Kafka. For that purpose, we’ll see what it takes to convert the shipment microservice from our recent post about the outbox pattern into Quarkus-based service.

    \ No newline at end of file diff --git a/tag/questdb/index.html b/tag/questdb/index.html index 2805ad0448..5a33f127ba 100644 --- a/tag/questdb/index.html +++ b/tag/questdb/index.html @@ -1 +1 @@ - Tag: questdb

    Debezium Blog

    This tutorial was originally published by QuestDB, where guest contributor, Yitaek Hwang, shows us how to stream data into QuestDB with change data capture via Debezium and Kafka Connect.

    \ No newline at end of file + Tag: questdb

    Debezium Blog

    This tutorial was originally published by QuestDB, where guest contributor, Yitaek Hwang, shows us how to stream data into QuestDB with change data capture via Debezium and Kafka Connect.

    \ No newline at end of file diff --git a/tag/rds/index.html b/tag/rds/index.html index baf1525b5b..b762ad5a92 100644 --- a/tag/rds/index.html +++ b/tag/rds/index.html @@ -1 +1 @@ - Tag: rds

    Debezium Blog

    In this blog post, we are going to discuss how Delhivery, the leading supply chain services company in India, is using Debezium to power a lot of different business use-cases ranging from driving event driven microservices, providing data integration and moving operational data to a data warehouse for real-time analytics and reporting. We will also take a look at the early mistakes we made when integrating Debezium and how we solved them so that any future users can avoid them, discuss one of the more challenging production incidents we faced and how Debezium helped ensure we could recover without any data loss. In closing, we discuss what value Debezium has provided us, areas where we believe there is a scope for improvement and how Debezium fits into our future goals.

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    \ No newline at end of file + Tag: rds

    Debezium Blog

    In this blog post, we are going to discuss how Delhivery, the leading supply chain services company in India, is using Debezium to power a lot of different business use-cases ranging from driving event driven microservices, providing data integration and moving operational data to a data warehouse for real-time analytics and reporting. We will also take a look at the early mistakes we made when integrating Debezium and how we solved them so that any future users can avoid them, discuss one of the more challenging production incidents we faced and how Debezium helped ensure we could recover without any data loss. In closing, we discuss what value Debezium has provided us, areas where we believe there is a scope for improvement and how Debezium fits into our future goals.

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    \ No newline at end of file diff --git a/tag/releases/index.html b/tag/releases/index.html index ebaa0c224a..7bec29e665 100644 --- a/tag/releases/index.html +++ b/tag/releases/index.html @@ -1 +1 @@ - Tag: releases

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    TL,DR: Debezium is NOT affected by the recently disclosed remote code execution vulnerability in log4j2 (CVE-2021-44228); The log4j-1.2.17.jar shipped in Debezium’s container images contains a class JMSAppender, which is subject to a MODERATE vulnerability (CVE-2021-4104). This appender is NOT used by default, i.e. access to log4j’s configuration is required in order to exploit this CVE. As a measure of caution, we have decided to remove the JMSAppender class from Debezium’s container images as of version 1.7.2.Final, released today.

    On Dec 10th, a remote code execution vulnerability in the widely used log4j2 library was published (CVE-2021-44228). Debezium, just like Apache Kafka and Kafka Connect, does not use log4j2 and therefore is NOT affected by this CVE.

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

    Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

    I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

    I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

    Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

    Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Now the good news — Debezium 0.1 is now available and includes several significant features:

    • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

    • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

    \ No newline at end of file + Tag: releases

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    TL,DR: Debezium is NOT affected by the recently disclosed remote code execution vulnerability in log4j2 (CVE-2021-44228); The log4j-1.2.17.jar shipped in Debezium’s container images contains a class JMSAppender, which is subject to a MODERATE vulnerability (CVE-2021-4104). This appender is NOT used by default, i.e. access to log4j’s configuration is required in order to exploit this CVE. As a measure of caution, we have decided to remove the JMSAppender class from Debezium’s container images as of version 1.7.2.Final, released today.

    On Dec 10th, a remote code execution vulnerability in the widely used log4j2 library was published (CVE-2021-44228). Debezium, just like Apache Kafka and Kafka Connect, does not use log4j2 and therefore is NOT affected by this CVE.

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team continues to add support for more features, allowing users to more easily configure connectors. In this article, we’ll describe and demonstrate the UI support for topic automatic creation. Read further for more information, including a video demo!

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    The Debezium UI team is pleased to announce support for Single Message Transformations (SMTs) in the Debezium UI!

    Our goal with the Debezium graphical user interface is to allow users to set up and operate connectors more easily. To that end, we have added support for Kafka Connect single message transformations to the UI. Read futher for more information, and for a video demo of the new feature!

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    We are pleased to announce the first official release of the Debezium graphical user interface!

    As announced a few months back, our team has been working on a Debezium UI proof-of-concept. The goal of the PoC was to explore ways in which a graphical UI could facilitate the getting started and operational experience of Debezium users.

    Debezium is very flexible - each connector can be configured and fine-tuned in a variety of ways. It provides metrics which give the user insight into the state of the running Debezium connectors, allowing the customer to safely operate CDC pipelines in huge installations with thousands of connectors. This flexibility, however, comes with a learning curve for the user to understand all of the different settings and options.

    To that end, we have produced a UI which will allow users to set up and operate connectors more easily. The UI is now available as part of the Debezium releases for our community!

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    It’s my pleasure to announce the release of Debezium 1.1.0.Beta1!

    This release adds support for transaction marker events, an incubating connector for the IBM Db2 database as well as a wide range of bug fixes. As the 1.1 release still is under active development, we’ve backported an asorted set of bug fixes to the 1.0 branch and released Debezium 1.0.1.Final, too.

    At the time of writing this, not all connector archives have been synched to Maven Central yet; this should be the case within the next few others.

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    It’s my pleasure to announce the release of Debezium 0.9.4.Final!

    This is a drop-in replacement for earlier Debezium 0.9.x versions, containing mostly bug fixes and some improvements related to metrics. Overall, 17 issues were resolved.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    As temperatures are cooling off, the Debezium team is getting into full swing again and we’re happy to announce the release of Debezium 0.8.3.Final!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 goes on in parallel. There are 14 fixes in this release. As in earlier 0.8.x releases, we’ve further improved the new Antlr-based DDL parser used by the MySQL connector (see DBZ-901, DBZ-903 and DBZ-910).

    The Postgres connector saw a huge improvement to its start-up time for databases with lots of custom types (DBZ-899). The user reporting this issue had nearly 200K entries in pg_catalog.pg_type, and due to an N + 1 SELECT issue within the Postgres driver itself, this caused the connector to take 24 minutes to start. By using a custom query for obtaining the type metadata, we were able to cut down this time to 5 seconds! Right now we’re working with the maintainers of the Postgres driver to get this issue fixed upstream, too.

    The Debezium team is back from summer holidays and we’re happy to announce the release of Debezium 0.8.2!

    This is a bugfix release to the current stable release line of Debezium, 0.8.x, while the work on Debezium 0.9 is continuing.

    Note: By accident the version of the release artifacts is 0.8.2 instead of 0.8.2.Final. This is not in line with our recently established convention of always letting release versions end with qualifiers such as Alpha1, Beta1, CR1 or Final. The next version in the 0.8 line will be 0.8.3.Final and we’ll improve our release pipeline to make sure that this situation doesn’t occur again.

    The 0.8.2 release contains 10 fixes overall, most of them dealing with issues related to DDL parsing as done by the Debezium MySQL connector. For instance, implicit non-nullable primary key columns will be handled correctly now using the new Antlr-based DDL parser (DBZ-860). Also the MongoDB connector saw a bug fix (DBZ-838): initial snapshots will be interrupted now if the connector is requested to stop (e.g. when shutting down Kafka Connect). More a useful improvement rather than a bug fix is the Postgres connector’s capability to add the table, schema and database names to the source block of emitted CDC events (DBZ-866).

    Thanks a lot to community members Andrey Pustovetov, Cliff Wheadon and Ori Popowski for their contributions to this release!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    I’m very happy to announce the release of Debezium 0.8.0.Final!

    The key features of Debezium 0.8 are the first work-in-progress version of our Oracle connector (based on the XStream API) and a brand-new parser for MySQL DDL statements. Besides that, there are plenty of smaller new features (e.g. propagation of default values to corresponding Connect schemas, optional propagation of source queries in CDC messages and a largely improved SMT for sinking changes from MongoDB into RDBMS) as well as lots of bug fixes (e.g. around temporal and numeric column types, large transactions with Postgres).

    Please see the previous announcements (Beta 1, CR 1) to learn about all the changes in more depth. The Final release largely resembles CR1; apart from further improvements to the Oracle connector (DBZ-792) there’s one nice addition to the MySQL connector contributed by Peter Goransson: when doing a snapshot, it will now expose information about the processed rows via JMX (DBZ-789), which is very handy when snapshotting larger tables.

    Please take a look at the change log for the complete list of changes in 0.8.0.Final and general upgrade notes.

    A fantastic Independence Day to all the Debezium users in the U.S.! But that’s not the only reason to celebrate: it’s also with great happiness that I’m announcing the release of Debezium 0.8.0.CR1!

    Following our new release scheme, the focus for this candidate release of Debezium 0.8 has been to fix bug reported for last week’s Beta release, accompanied by a small number of newly implemented features.

    Thanks a lot to everyone testing the new Antlr-based DDL parser for the MySQL connector; based on the issues you reported, we were able to fix a few bugs in it. As announced recently, for 0.8 the legacy parser will remain the default implementation, but you are strongly encouraged to test out the new one (by setting the connector option ddl.parser.mode to antlr) and report any findings you may have. We’ve planned to switch to the new implementation by default in Debezium 0.9.

    It’s with great excitement that I’m announcing the release of Debezium 0.8.0.Beta1!

    This release brings many exciting new features as well as bug fixes, e.g. the first drop of our new Oracle connector, a brand new DDL parser for the MySQL connector, support for MySQL default values and the update to Apache Kafka 1.1.

    Due to the big number of changes (the release contains exactly 42 issues overall), we decided to alter our versioning schema a little bit: going forward we may do one or more Beta and CR ("candidate release") releases before doing a final one. This will allow us to get feedback from the community early on, while still completing and polishing specific features. Final (stable) releases will be named like 0.8.0.Final etc.

    It’s my pleasure to announce the release of Debezium 0.7.5!

    This is a bugfix release to the 0.7 release line, which we decided to do while working towards Debezium 0.8. Most notably it fixes an unfortunate bug introduced in 0.7.3 (DBZ-663), where the internal database history topic of the Debezium MySQL connector could be partly deleted under some specific conditions. Please see the dedicated blog post on this issue to find out whether this affects you and what you should do to prevent this issue.

    Together with this, we released a couple of other fixes and improvements. Thanks to Maciej Brynski, the performance of the logical table routing SMT has been improved significantly (DBZ-655). Another fix contributed by Maciej is for DBZ-646 which lets the MySQL connector handle CREATE TABLE statements for the TokuDB storage engine now.

    It’s my pleasure to announce the release of Debezium 0.7.4!

    Continuing the 0.7 release line, this new version brings several bug fixes and a handful of new features. We recommend this upgrade to all users. When upgrading from earlier versions, please check out the release notes of all versions between the one you’re currently on and 0.7.4 in order to learn about any steps potentially required for upgrading.

    I’m very happy to announce the release of Debezium 0.7.3!

    This is primarily a bugfix release, but we’ve also added a handful of smaller new features. It’s a recommended upgrade for all users. When upgrading from earlier versions, please check out the release notes of all versions between the one your’re currently on and 0.7.3 in order to learn about any steps potentially required for upgrading.

    Let’s take a closer look at some of the new features.

    It’s my pleasure to announce the release of Debezium 0.7.2!

    Amongst the new features there’s support for geo-spatial types, a new snapshotting mode for recovering a lost DB history topic for the MySQL connector, and a message transformation for converting MongoDB change events into a structure which can be consumed by many more sink connectors. And of course we fixed a whole lot of bugs, too.

    Debezium 0.7.2 is a drop-in replacement for previous 0.7.x versions. When upgrading from versions earlier than 0.7.0, please check out the release notes of all 0.7.x releases to learn about any steps potentially required for upgrading.

    A big thank you goes out to our fantastic community members for their hard work on this release: Andrey Pustovetov, Denis Mikhaylov, Peter Goransson, Robert Coup, Sairam Polavarapu and Tom Bentley.

    Now let’s take a closer look at some of new features.

    Just last few days before Christmas we are releasing Debezium 0.7.1! This is a bugfix release that fixes few annoying issues that were found during first rounds of use of Debezium 0.7 by our community. All issues relate to either newly provided wal2json support or reduced risk of internal race condition improvement.

    Robert Coup has found a performance regression in situations when 0.7.0 was used with old version of Protobuf decoder.

    Suraj Savita (and others) has found an issue when our code failed to correctly detect it runs with Amazon RDS wal2json plug-in. We are outsmarted by the JDBC driver internals and included a distinct plugin decoder name wal2json_rds that bypasses detection routine and by default expects it runs against Amazon RDS instance. This mode should be used only with RDS instances.

    We have also gathered feedback from first tries to run with Amazon RDS and included a short section in our documentation on this topic.

    It’s not Christmas yet, but we already got a present for you: Debezium 0.7.0 is here, full of new features as well as many bug fixes! A big thank you goes out to all the community members who contributed to this release. It is very encouraging for us to see not only more and more issues and feature requests being reported, but also pull requests coming in.

    Note that this release comes with a small number of changes to the default mappings for some data types. We try to avoid this sort of changes as far as possible, but in some cases it is required, e.g. if the previous mapping could have caused potential value losses. Please see below for the details and also make sure to check out the full change log which describes these changes in detail.

    Now let’s take a closer look at some of new features.

    We are accelerating! Three weeks after the 0.6.1 release, the Debezium team is bringing Debezium 0.6.2 to you!

    This release revolves mostly around bug fixes, but there are a few new features, too. Let’s take a closer look at some of the changes.

    Just shy of a month after the 0.6.0 release, I’m happy to announce the release of Debezium 0.6.1!

    This release contains several bugfixes, dependency upgrades and a new option for controlling how BIGINT UNSIGNED columns are conveyed. We also expanded the set of Docker images and Docker Compose files accompanying our tutorial, so you can run it now with all the databases we support.

    Let’s take a closer look at some of the changes.

    What’s better than getting Java 9? Getting Java 9 and a new version of Debezium at the same time! So it’s with great happiness that I’m announcing the release of Debezium 0.6 today.

    I’m very happy to announce the release of Debezium 0.5.2!

    As the previous release, the 0.5.2 release fixes several bugs in the MySQL, Postgres and MongoDB connectors. But there are also several new features and options:

    • The decimal.handling.mode option already known from the MySQL connector is now also supported for PostgreSQL (DBZ-337). It lets you control how NUMERIC and DECIMAL columns are represented in change events (either using Kafka’s Decimal type or as double).

    • The MongoDB connector supports the options database.whitelist and database.blacklist now (DBZ-302)

    • The PostgreSQL connector can deal with array-typed columns as well as with quoted identifiers for tables, schemas etc. (DBZ-297, DBZ-298)

    • The Debezium Docker images run on Red Hat’s OpenShift cloud environment (DBZ-267)

    It’s my pleasure to announce the release of Debezium 0.5.1!

    This release fixes several bugs in the MySQL, Postgres and MongoDB connectors. There’s also support for some new datatypes: POINT on MySQL (DBZ-222) and TSTZRANGE on Postgres (DBZ-280). This release is a drop-in replacement for 0.5.0, upgrading is recommended to all users.

    Note that in the — rather unlikely — case that you happened to enable Debezium for all the system tables of MySQL, any configured table filters will be applied to these system tables now, too (DBZ-242). This may require an adjustment of your filters if you indeed wanted to capture all system tables but only selected non-system tables.

    We’re happy to announce that Debezium 0.5.0 is now available for use with Kafka Connect 0.10.2.0. This release also includes a few fixes for the MySQL connector. See the release notes for specifics on these changes, and be sure to check out the Kafka documentation for compatibility with the version of the Kafka broker that you are using.

    Kafka Connect 0.10.2.0 comes with a significant new feature called Single Message Transforms, and you can now use them with Debezium connectors. SMTs allow you to modify the messages produced by Debezium connectors and any oher Kafka Connect source connectors, before those messages are written to Kafka. SMTs can also be used with Kafka Connect sink connectors to modify the messages before the sink connectors processes them. You can use SMTs to filter out or mask specific fields, add new fields, modify existing fields, change the topic and/or topic partition to which the messages are written, and even more. And you can even chain multiple SMTs together.

    Kafka Connect comes with a number of built-in SMTs that you can simply configure and use, but you can also create your own SMT implementations to do more complex and interesting things. For example, although Debezium connectors normally map all of the changes in each table (or collection) to separate topics, you can write a custom SMT that uses a completely different mapping between tables and topics and even add fields to message keys and/or values. Using your new SMT is also very easy - simply put it on the Kafka Connect classpath and update the connector configuration to use it.

    We’ve also added Debezium Docker images labelled 0.5 and latest, which we use in our tutorial.

    Thanks to Sanjay and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.1 is now available for use with Kafka Connect 0.10.1.1. This release includes several fixes for the MongoDB connector and MySQL connector, including improved support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Jan, Horia, David, Josh, Johan, Sanjay, Saulius, and everyone in the community for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.4.0 is now available for use with Kafka Connect 0.10.1.1. This release introduces a new PostgreSQL connector, and contains over a dozen fixes combined for the MongoDB connector and MySQL connector, including preliminar support for Amazon RDS and Amazon Aurora (MySQL compatibility). See the release notes for specifics on these changes.

    We’ve also created Debezium Docker images labelled 0.4 and latest, which we use in our tutorial.

    Thanks to Horia, Chris, Akshath, Ramesh, Matthias, Anton, Sagi, barton, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.6 is now available for use with Kafka Connect 0.10.0.1. This release contains over a dozen fixes combined for the MySQL connector and MongoDB connectors. See the release notes for specifics on these changes.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Farid, RenZhu, Dongjun, Anton, Chris, Dennis, Sharaf, Rodrigo, Tim, and others for their help with this release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.5 is now available for use with Kafka Connect 0.10.0.1. This release contains several fixes for the MySQL connector and adds the ability to use with multi-master MySQL servers as sources. See the release notes for specifics on these changes. We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    One of the fixes is signficant, and so we strongly urge all users to upgrade to this release from all earlier versions. In prior versions, the MySQL connector may stop without completing all updates in a transaction, and when the connector restarts it starts with the next transaction and therefore might fail to capture some of the change events in the earlier transaction. This release fixes this issue so that when restarting it will always pick up where it left off, even if that point is in the middle of a transaction. Note that this fix only takes affect once a connector is upgraded and restarted. See the issue for more details.

    Thanks to Akshath, Anton, Chris, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.4 is now available for use with Kafka Connect 0.10.0.1. This release contains several new features for the MySQL connector: support for MySQL’s JSON datatype, a new snapshot mode called schema_only, and JMX metrics. Also, the Debezium Docker images for Zookeeper, Kafka, and Kafka Connect have all been updated to allow optionally expose JMX metrics in these services. And, one backward-incompatible fix was made to the change event’s ts_sec field. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Akshath, Chris, Vitalii, Dennis, Prannoy, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.3 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector, including better handling of BIT(n) values, ENUM and SET values, and GTID sets, This release also improves the log messages output by the MySQL connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Chris, Randy, Prannoy, Umang, Horia, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.2 is now available for use with Kafka Connect 0.10.0.1. This release contains a handful of bug fixes and minor improvements for the MySQL connector and MongoDB connector. The MySQL connector better handles BIT(n) values and zero-value date and timestamp values. This release also improves the log messages output by the MySQL and MongoDB connectors to better represent the ongoing activity when consuming the changes from the source database. See the release notes for specifics.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial. We’ve also updated the tutorial to use the latest Docker installations on Linux, Windows, and OS X.

    Thanks to Akshath, Colum, Emmanuel, Konstantin, Randy, RenZhu, Umang, and others for their help with the release, issues, discussions, contributions, and questions!

    We’re happy to announce that Debezium 0.3.1 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with a handful of bug fixes and two significant but backward-compatible changes. First, the MySQL connector now supports using secure connections to MySQL, adding to the existing ability to connect securely to Kafka. Second, the MySQL connector is able to capture MySQL string values using the proper character sets so that any values stored in the database can be captured correctly in events. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images labelled 0.3 and latest, which we use in our tutorial.

    Thanks to Chris, Akshath, barten, and and others for their help with the release, issues, discussions, contributions, and questions!

    After a few weeks delay, Debezium 0.3.0 is now available for use with Kafka Connect 0.10.0.1. This release contains an updated MySQL connector with quite a few bug fixes, and a new MongoDB connector that captures the changes made to a MongoDB replica set or MongoDB sharded cluster. See the documentation for details about how to configure these connectors and how they work.

    We’ve also updated the Debezium Docker images (with labels 0.3 and latest) used in our tutorial.

    Thanks to Andrew, Bhupinder, Chris, David, Horia, Konstantin, Tony, and others for their help with the release, issues, discussions, contributions, and questions!

    I’m happy to announce that Debezium 0.2.4 is now available for use with Kafka Connect 0.9.0.1. This release adds more verbose logging during MySQL snapshots, enables taking snapshots of very large MySQL databases, and correct a potential exception during graceful shutdown. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to David and wangshao for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.1.

    I’m happy to announce that Debezium 0.2.3 is now available for use with Kafka Connect 0.9.0.1. This release corrects the MySQL connector behavior when working with TINYINT and SMALLINT columns or with TIME, DATE, and TIMESTAMP columns. See our release notes for details of these changes and for upgrading recommendations.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    Thanks to Chris, Christian, Laogang, and Tony for their help with the release, issues, discussions, contributions, and questions! Stay tuned for our next release, which will be 0.3 and will have a new MongoDB connector and will support Kafka Connect 0.10.0.0.

    I’m happy to announce that Debezium 0.2.2 is now available. This release fixes several bugs in the MySQL connector that can produce change events with incorrect source metadata, and that eliminates the possibility a poorly-timed connector crash causing the connector to only process some of the rows in a multi-row MySQL event. See our release notes for details of these changes and for upgrading recommendations.

    Also, thanks to a community member for reporting that Debezium 0.2.x can only be used with Kafka Connect 0.9.0.1. Debezium 0.2.x cannot be used with Kafka Connect 0.10.0.0 because of its backward incompatible changes to the consumer API. Our next release of Debezium will support Kafka 0.10.x.

    We’ve also updated the Debezium Docker images (with label 0.2 and latest) used in our tutorial.

    I’m happy to announce that Debezium 0.2.1 is now available. The MySQL connector has been significantly improved and is now able to monitor and produce change events for HA MySQL clusters using GTIDs, perform a consistent snapshot when starting up the first time, and has a completely redesigned event message structure that provides a ton more information with every event. Our change log has all the details about bugs, enhancements, new features, and backward compatibility notices. We’ve also updated our tutorial.

    Debezium is a distributed platform that turns your existing databases into event streams, so applications can see and respond almost instantly to each committed row-level change in the databases. Debezium is built on top of Kafka and provides Kafka Connect compatible connectors that monitor specific database management systems. Debezium records the history of data changes in Kafka logs, so your application can be stopped and restarted at any time and can easily consume all of the events it missed while it was not running, ensuring that all events are processed correctly and completely. Debezium is open source under the Apache License, Version 2.0.

    Now the good news — Debezium 0.1 is now available and includes several significant features:

    • A connector for MySQL to monitor MySQL databases. It’s a Kafka Connect source connector, so simply install it into a Kafka Connect service (see below) and use the service’s REST API to configure and manage connectors to each DBMS server. The connector reads the MySQL binlog and generates data change events for every committed row-level modification in the monitored databases. The MySQL connector generates events based upon the tables' structure at the time the row is changed, and it automatically handles changes to the table structures.

    • A small library so applications can embed any Kafka Connect connector and consume data change events read directly from the source system. This provides a much lighter weight system (since Zookeeper, Kafka, and Kafka Connect services are not needed), but as a consequence is not as fault tolerant or reliable since the application must maintain state normally kept inside Kafka’s distributed and replicated logs. Thus the application becomes completely responsible for managing all state.

    \ No newline at end of file diff --git a/tag/schema/index.html b/tag/schema/index.html index 3b0c8b8d41..ea9ceee794 100644 --- a/tag/schema/index.html +++ b/tag/schema/index.html @@ -1 +1 @@ - Tag: schema

    Debezium Blog

    Change events streamed from a database by Debezium are (in developer parlance) strongly typed. This means that event consumers should be aware of the types of data conveyed in the events. This problem of passing along message type data can be solved in multiple ways:

    \ No newline at end of file + Tag: schema

    Debezium Blog

    Change events streamed from a database by Debezium are (in developer parlance) strongly typed. This means that event consumers should be aware of the types of data conveyed in the events. This problem of passing along message type data can be solved in multiple ways:

    \ No newline at end of file diff --git a/tag/scylla/index.html b/tag/scylla/index.html index f1d5b9983d..ed3d17441d 100644 --- a/tag/scylla/index.html +++ b/tag/scylla/index.html @@ -1 +1 @@ - Tag: scylla

    Debezium Blog

    At ScyllaDB, we develop a high-performance NoSQL database Scylla, API-compatible with Apache Cassandra, Amazon DynamoDB and Redis. Earlier this year, we introduced support for Change Data Capture in Scylla 4.3. This new feature seemed like a perfect match for integration with the Apache Kafka ecosystem, so we developed the Scylla CDC Source Connector using the Debezium framework. In this blogpost we will cover the basic structure of Scylla’s CDC, reasons we chose the Debezium framework and design decisions we made.

    \ No newline at end of file + Tag: scylla

    Debezium Blog

    At ScyllaDB, we develop a high-performance NoSQL database Scylla, API-compatible with Apache Cassandra, Amazon DynamoDB and Redis. Earlier this year, we introduced support for Change Data Capture in Scylla 4.3. This new feature seemed like a perfect match for integration with the Apache Kafka ecosystem, so we developed the Scylla CDC Source Connector using the Debezium framework. In this blogpost we will cover the basic structure of Scylla’s CDC, reasons we chose the Debezium framework and design decisions we made.

    \ No newline at end of file diff --git a/tag/secrets/index.html b/tag/secrets/index.html index 698cdff95c..ac0f92959b 100644 --- a/tag/secrets/index.html +++ b/tag/secrets/index.html @@ -1 +1 @@ - Tag: secrets

    Debezium Blog

    When a Debezium connector is deployed to a Kafka Connect instance it is sometimes necessary to keep database credentials hidden from other users of the Connect API.

    Let’s remind how a connector registration request looks like for the MySQL Debezium connector:

    \ No newline at end of file + Tag: secrets

    Debezium Blog

    When a Debezium connector is deployed to a Kafka Connect instance it is sometimes necessary to keep database credentials hidden from other users of the Connect API.

    Let’s remind how a connector registration request looks like for the MySQL Debezium connector:

    \ No newline at end of file diff --git a/tag/sentry/index.html b/tag/sentry/index.html index 15c5f8e3df..6e1a012aeb 100644 --- a/tag/sentry/index.html +++ b/tag/sentry/index.html @@ -1 +1 @@ - Tag: sentry

    Debezium Blog

    Debezium has received a huge improvement to the structure of its container images recently, making it extremely simple to extend its behaviour.

    This is a small tutorial showing how you can for instance add Sentry, "an open-source error tracking [software] that helps developers monitor and fix crashes in real time". Here we’ll use it to collect and report any exceptions from Kafka Connect and its connectors. Note that this is only applicable for Debezium 0.9+.

    We need a few things to have Sentry working, and we’ll add all of them and later have a Dockerfile which gets it all glued correctly:

    • Configure Log4j

    • SSL certificate for sentry.io, since it’s not by default in the JVM trusted chain

    • The sentry and sentry-log4j libraries

    \ No newline at end of file + Tag: sentry

    Debezium Blog

    Debezium has received a huge improvement to the structure of its container images recently, making it extremely simple to extend its behaviour.

    This is a small tutorial showing how you can for instance add Sentry, "an open-source error tracking [software] that helps developers monitor and fix crashes in real time". Here we’ll use it to collect and report any exceptions from Kafka Connect and its connectors. Note that this is only applicable for Debezium 0.9+.

    We need a few things to have Sentry working, and we’ll add all of them and later have a Dockerfile which gets it all glued correctly:

    • Configure Log4j

    • SSL certificate for sentry.io, since it’s not by default in the JVM trusted chain

    • The sentry and sentry-log4j libraries

    \ No newline at end of file diff --git a/tag/serialization/index.html b/tag/serialization/index.html index 75ec1367a7..1482110643 100644 --- a/tag/serialization/index.html +++ b/tag/serialization/index.html @@ -1 +1 @@ - Tag: serialization

    Debezium Blog

    Although Debezium makes it easy to capture database changes and record them in Kafka, one of the more important decisions you have to make is how those change events will be serialized in Kafka. Every message in Kafka has a key and a value, and to Kafka these are opaque byte arrays. But when you set up Kafka Connect, you have to say how the Debezium event keys and values should be serialized to a binary form, and your consumers will also have to deserialize them back into a usable form.

    Debezium event keys and values are both structured, so JSON is certainly a reasonable option — it’s flexible, ubiquitous, and language agnostic, but on the other hand it’s quite verbose. One alternative is Avro, which is also flexible and language agnostic, but also faster and results in smaller binary representations. Using Avro requires a bit more setup effort on your part and some additional software, but the advantages are often worth it.

    \ No newline at end of file + Tag: serialization

    Debezium Blog

    Although Debezium makes it easy to capture database changes and record them in Kafka, one of the more important decisions you have to make is how those change events will be serialized in Kafka. Every message in Kafka has a key and a value, and to Kafka these are opaque byte arrays. But when you set up Kafka Connect, you have to say how the Debezium event keys and values should be serialized to a binary form, and your consumers will also have to deserialize them back into a usable form.

    Debezium event keys and values are both structured, so JSON is certainly a reasonable option — it’s flexible, ubiquitous, and language agnostic, but on the other hand it’s quite verbose. One alternative is Avro, which is also flexible and language agnostic, but also faster and results in smaller binary representations. Using Avro requires a bit more setup effort on your part and some additional software, but the advantages are often worth it.

    \ No newline at end of file diff --git a/tag/signaling/index.html b/tag/signaling/index.html index 4c0d81ab9b..6d20f05949 100644 --- a/tag/signaling/index.html +++ b/tag/signaling/index.html @@ -1 +1 @@ - Tag: signaling

    Debezium Blog

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    \ No newline at end of file + Tag: signaling

    Debezium Blog

    Welcome to the third installment of our series on Debezium Signaling and Notifications. In this article, we continue our exploration of Debezium signaling and notifications. In particular, we will delve into how to enable and manage these features using the JMX channel.

    We will also explore how to send signals and get notifications through the REST API leveraging Jolokia.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the second installment in the series, where we will discuss how to customize the signal and notification channels in Debezium.

    Debezium 2.3 introduced new improvements in signaling and notification capabilities. You can set up new signals and notification channels in addition to the pre-defined signals and notification channels offered by Debezium. This feature enables users to customize the system to suit their unique needs and combine it with their existing infrastructure or third-party solutions. It enables effective monitoring and a proactive response to data changes by precisely capturing and communicating signal events and triggering notifications through preferred channels.

    The first article in this series, Signaling and Notifications in Debezium, provides an overview of the signaling and notification features in Debezium. It also discusses the available channels & their use cases for various scenarios.

    Welcome to this series of articles dedicated to signaling and notifications in Debezium! This post serves as the first installment in the series, where we will introduce the signaling and notification features offered by Debezium and discuss the available channels for interacting with the platform.

    In the subsequent parts of this series, we will delve deeper into customizing signaling channels and explore additional topics such as JMX signaling and notifications.

    \ No newline at end of file diff --git a/tag/smt/index.html b/tag/smt/index.html index 97eb50efe6..542457b4ef 100644 --- a/tag/smt/index.html +++ b/tag/smt/index.html @@ -1 +1 @@ - Tag: smt

    Debezium Blog

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    \ No newline at end of file + Tag: smt

    Debezium Blog

    We wish all the best to the Debezium community for 2018!

    While we’re working on the 0.7.2 release, we thought we’d publish another post describing an end-to-end data streaming use case based on Debezium. We have seen how to set up a change data stream to a downstream database a few weeks ago. In this blog post we will follow the same approach to stream the data to an Elasticsearch server to leverage its excellent capabilities for full-text search on our data. But to make the matter a little bit more interesting, we will stream the data to both, a PostgreSQL database and Elasticsearch, so we will optimize access to the data via the SQL query language as well as via full-text search.

    In this blog post we will create a simple streaming data pipeline to continuously capture the changes in a MySQL database and replicate them in near real-time into a PostgreSQL database. We’ll show how to do this without writing any code, but instead by using and configuring Kafka Connect, the Debezium MySQL source connector, the Confluent JDBC sink connector, and a few single message transforms (SMTs).

    This approach of replicating data through Kafka is really useful on its own, but it becomes even more advantageous when we can combine our near real-time streams of data changes with other streams, connectors, and stream processing applications. A recent Confluent blog post series shows a similar streaming data pipeline but using different connectors and SMTs. What’s great about Kafka Connect is that you can mix and match connectors to move data between multiple systems.

    We will also demonstrate a new functionality that was released with Debezium 0.6.0: a single message transform for CDC Event Flattening.

    \ No newline at end of file diff --git a/tag/snapshots/index.html b/tag/snapshots/index.html index 34b85add0d..461e23310e 100644 --- a/tag/snapshots/index.html +++ b/tag/snapshots/index.html @@ -1 +1 @@ - Tag: snapshots

    Debezium Blog

    The engineering team at Shopify recently improved the Debezium MySQL connector so that it supports incremental snapshotting for databases without write access by the connector, which is required when pointing Debezium to read-only replicas. In addition, the Debezium MySQL connector now also allows schema changes during an incremental snapshot. This blog post explains the implementation details of those features.

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    \ No newline at end of file + Tag: snapshots

    Debezium Blog

    The engineering team at Shopify recently improved the Debezium MySQL connector so that it supports incremental snapshotting for databases without write access by the connector, which is required when pointing Debezium to read-only replicas. In addition, the Debezium MySQL connector now also allows schema changes during an incremental snapshot. This blog post explains the implementation details of those features.

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    \ No newline at end of file diff --git a/tag/spanner/index.html b/tag/spanner/index.html index d84c200a44..353b078719 100644 --- a/tag/spanner/index.html +++ b/tag/spanner/index.html @@ -1 +1 @@ - Tag: spanner

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    \ No newline at end of file + Tag: spanner

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    \ No newline at end of file diff --git a/tag/spark/index.html b/tag/spark/index.html index b9b5df3e07..b177172629 100644 --- a/tag/spark/index.html +++ b/tag/spark/index.html @@ -1 +1 @@ - Tag: spark

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    \ No newline at end of file + Tag: spark

    Debezium Blog

    Debezium provides a way to run the connectors directly within Debezium from the very beginning of the project. The way how it was provided has changed over the time and it still evolves. This article will describe another evolution step in this regard - new implementation of Debezium engine.

    In the previous blog post, we have shown how to leverage Debezium to train neural-network model with the existing data from the database and use this pre-trained model to classify images newly stored into the database. In this blog post, we will move it one step further - we will use Debezium to create multiple data streams from the database and use one of the streams for continuous learning and to improve our model, and the second one for making predictions on the data. When the model is constantly improved or adjusted to recent data samples, this approach is known as online machine learning. Online learning is only suitable for some use cases, and implementing an online variant of a given algorithm may be challenging or even impossible. However, in situations where online learning is possible, it becomes a very powerful tool as it allows one to react to the changes in the data in real-time and avoids the need to re-train and re-deploy new models, thus saving the hardware and operational costs. As the streams of data become more and more common, e.g. with the advent of IoT, we can expect online learning to become more and more popular. It’s usually a perfect fit for analyzing streaming data in use cases where it’s possible.

    \ No newline at end of file diff --git a/tag/sql/index.html b/tag/sql/index.html index acaf489323..19c59f037c 100644 --- a/tag/sql/index.html +++ b/tag/sql/index.html @@ -1 +1 @@ - Tag: sql

    Debezium Blog

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    When our MySQL connector is reading the binlog of a MySQL server or cluster, it parses the DDL statements in the log and builds an in-memory model of each table’s schema as it evolves over time. This process is important because the connector generates events for each table using the definition of the table at the time of each event. We can’t use the database’s current schema, since it may have changed since the point in time (or position in the log) where the connector is reading.

    Parsing DDL of MySQL or any other major relational database can seem to be a daunting task. Usually each DBMS has a highly-customized SQL grammar, and although the data manipulation language (DML) statements are often fairly close the standards, the data definition language (DDL) statements are usually less so and involve more DBMS-specific features.

    So given this, why did we write our own DDL parser for MySQL? Let’s first look at what Debezium needs a DDL parser to do.

    \ No newline at end of file + Tag: sql

    Debezium Blog

    Update (Oct. 11 2019): An alternative, and much simpler, approach for running Debezium (and Apache Kafka and Kafka Connect in general) on Kubernetes is to use a K8s operator such as Strimzi. You can find instructions for the set-up of Debezium on OpenShift here, and similar steps apply for plain Kubernetes.

    Our Debezium Tutorial walks you step by step through using Debezium by installing, starting, and linking together all of the Docker containers running on a single host machine. Of course, you can use things like Docker Compose or your own scripts to make this easier, although that would just automating running all the containers on a single machine. What you really want is to run the containers on a cluster of machines. In this blog, we’ll run Debezium using a container cluster manager from Red Hat and Google called Kubernetes.

    Kubernetes is a container (Docker/Rocket/Hyper.sh) cluster management tool. Like many other popular cluster management and compute resource scheduling platforms, Kubernetes' roots are in Google, who is no stranger to running containers at scale. They start, stop, and cluster 2 billion containers per week and they contributed a lot of the Linux kernel underpinnings that make containers possible. One of their famous papers talks about an internal cluster manager named Borg. With Kubernetes, Google got tired of everyone implementing their papers in Java so they decided to implement this one themselves :)

    Kubernetes is written in Go-lang and is quickly becoming the de-facto API for scheduling, managing, and clustering containers at scale. This blog isn’t intended to be a primer on Kubernetes, so we recommend heading over to the Getting Started docs to learn more about Kubernetes.

    When our MySQL connector is reading the binlog of a MySQL server or cluster, it parses the DDL statements in the log and builds an in-memory model of each table’s schema as it evolves over time. This process is important because the connector generates events for each table using the definition of the table at the time of each event. We can’t use the database’s current schema, since it may have changed since the point in time (or position in the log) where the connector is reading.

    Parsing DDL of MySQL or any other major relational database can seem to be a daunting task. Usually each DBMS has a highly-customized SQL grammar, and although the data manipulation language (DML) statements are often fairly close the standards, the data definition language (DDL) statements are usually less so and involve more DBMS-specific features.

    So given this, why did we write our own DDL parser for MySQL? Let’s first look at what Debezium needs a DDL parser to do.

    \ No newline at end of file diff --git a/tag/sqlserver/index.html b/tag/sqlserver/index.html index 1b657f08b3..8b216ca16a 100644 --- a/tag/sqlserver/index.html +++ b/tag/sqlserver/index.html @@ -1 +1 @@ - Tag: sqlserver

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    \ No newline at end of file + Tag: sqlserver

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    One of the major improvements in Debezium starting in version 1.6 is support for incremental snapshots. In this blog post we are going to explain the motivation for this feature, we will do a deep dive into the implementation details, and we will also show a demo of it.

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    We are very happy to announce the release of Debezium 1.7.0.CR2!

    As we are moving ahead towards the final release we include mostly bugfixes. Yet this release contains important performance improvements and a new feature for read-only MySQL incremental snapshots.

    I am very happy to announce the release of Debezium 1.7.0.CR1!

    For this release, we’ve reworked how column filters are handled during snapshotting, the Debezium container images have been updated to use Fedora 34 as their base, there’s support for MySQL INVISIBLE columns, and much more.

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    It’s my pleasure to announce the release of Debezium 1.6.0.CR1!

    This release adds skipped operations optimizations for SQL Server, introduces Heartbeat support to the Oracle connector, Oracle BLOB/CLOB support is now opt-in only, and provides a range of bug fixes and other improvements across different Debezium connectors.

    It’s my pleasure to announce the release of Debezium 1.6.0.Beta2!

    This release adds support for Pravega to Debezium Server, expands the snapshotting options of the Debezium Oracle connector, and provides a range of bug fixes and other improvements across different Debezium connectors.

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    I’m very happy to announce the release of Debezium 1.3.0.CR1!

    As we approach the final stretch of Debezium 1.3 Final, we took this opportunity to add delegate converter support for the ByteBufferConverter and introduce a debezium-scripting module. In addition, there’s also a range of bug fixes and quite a bit of documentation polish; overall, not less than 15 issues have been resolved for this release.

    I’m very happy to announce the release of Debezium 1.3.0.Beta2!

    In this release we’ve improved support for column filtering for the MySQL and SQL Server connectors, and there’s a brand-new implementation for ingesting change events from Oracle, using the LogMiner package. As we’re on the home stretch towards Debezium 1.3 Final, there’s also a wide range of smaller improvements, bug fixes and documentation clarifications; overall, not less than 44 issues have been resolved for this release.

    It’s my pleasure to announce the release of Debezium 1.3.0.Beta1!

    This release upgrades to the recently released Apache Kafka version 2.6.0, fixes several critical bugs and comes with a renaming of the connector configuration options for selecting the tables to be captured. We’ve also released Debezium 1.2.2.Final, which is a drop-in replacement for all users of earlier 1.2.x releases.

    I’m excited to announce the release of Debezium 1.3.0.Alpha1!

    This initial pass in the 1.3 release line provides a number of useful new features:

    • A new Debezium Server sink adapter for Azure Event Hubs

    • A new SQL Server connector snapshot mode, initial_only

    • Additional connection timeout options for the MongoDB Connector

    Overall, the community fixed not less than 31 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    I’m very happy to announce the release of Debezium 1.2.0.Final!

    Over the last three months, the community has resolved nearly 200 issues. Key features of this release include:

    • New Kafka Connect single message transforms (SMTs) for content-based event routing and filtering; Upgrade to Apache Kafka 2.5

    • Schema change topics for the Debezium connectors for SQL Server, Db2 and Oracle

    • Support for SMTs and message converters in the Debezium embedded engine

    • Debezium Server, a brand-new runtime which allows to propagate data change events to a range of messaging infrastructures like Amazon Kinesis, Google Cloud Pub/Sub, and Apache Pulsar

    • A new column masking mode "consistent hashing", allowing to anonymize column values while still keeping them correlatable

    • New metrics for the MongoDB connector

    • Improved re-connect capability for the SQL Server connector

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    With great happiness I’m announcing the release of Debezium 1.2.0.Beta1!

    This release brings user-facing schema change topics for the SQL Server, Db2 and Oracle connectors, a new message transformation for content-based change event routing, support for a range of array column types in Postgres and much more. We also upgraded the Debezium container images for Apache Kafka and Kafka Connect to version 2.5.0.

    As it’s the answer to all questions in life, the number of issues fixed for this release is exactly 42!

    I’m very happy to announce the release of Debezium 1.2.0.Alpha1!

    This first drop of the 1.2 release line provides a number of useful new features:

    • Support for message transformations (SMTs) and converters in the Debezium embedded engine API

    • A new SMT for filtering out change events using scripting languages

    • Automatic reconnects for the SQL Server connector

    • A new column masking mode using consistent hash values

    Overall, the community fixed not less than 41 issues for this release. Let’s take a closer look at some of them in the remainder of this post.

    It’s with great excitement that I’m announcing the release of Debezium 1.1.0.Final!

    About three months after the 1.0 release, this new version comes with many exciting new features such as:

    It’s my pleasure to announce the release of Debezium 1.1.0.CR1!

    This release brings a brand-new API module, including a facility for overriding the schema and value conversion of specific columns. The Postgres connector gained the ability to reconnect to the database after a connection loss, and the MongoDB connector supports the metrics known from other connectors now.

    Release early, release often! After the 1.1 Beta1 and 1.0.1 Final releases earlier this week, I’m today happy to share the news about the release of Debezium 1.1.0.Beta2!

    The main addition in Beta2 is support for integration tests of your change data capture (CDC) set-up using Testcontainers. In addition, the Quarkus extension for implementing the outbox pattern as well as the SMT for extracting the after state of change events have been re-worked and offer more configuration flexibility now.

    Did you know January 16th is National Nothing Day? It’s the one day in the year without celebrating, observing or honoring anything.

    Well, normally, that is. Because we couldn’t stop ourselves from sharing the news of the Debezium 1.1.0.Alpha1 release with you! It’s the first release after Debezium 1.0, and there are some really useful features coming with it. Let’s take a closer look.

    Today it’s my great pleasure to announce the availability of Debezium 1.0.0.Final!

    Since the initial commit in November 2015, the Debezium community has worked tirelessly to realize the vision of building a comprehensive open-source low-latency platform for change data capture (CDC) for a variety of databases.

    Within those four years, Debezium’s feature set has grown tremendously: stable, highly configurable CDC connectors for MySQL, Postgres, MongoDB and SQL Server, incubating connectors for Apache Cassandra and Oracle, facilities for transforming and routing change data events, support for design patterns such as the outbox pattern and much more. A very active and welcoming community of users, contributors and committers has formed around the project. Debezium is deployed to production at lots of organizations from all kinds of industries, some with huge installations, using hundreds of connectors to stream data changes out of thousands of databases.

    The 1.0 release marks an important milestone for the project: based on all the production feedback we got from the users of the 0.x versions, we figured it’s about time to express the maturity of the four stable connectors in the version number, too.

    Did you know December 12th is National Ding-a-Ling Day? It’s the day to call old friends you haven’t heard from in a while. So we thought we’d get in touch (not that is has been that long) with our friends, i.e. you, and share the news about the release of Debezium 1.0.0.CR1!

    It’s the first, and ideally only, candidate release; so Debezium 1.0 should be out very soon. Quite a few nice features found their way into CR1:

    While fall weather is in full swing, the Debezium community is not letting the unusually low, frigid temperatures get the best of us. It is my pleasure to announce the release of Debezium 1.0.0.Beta3!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Built against Kafka Connect 2.3.1 (DBZ-1612)

    • Renamed drop_on_stop configuration parameter to drop.on.stop (DBZ-1595)

    • Standardized source information for Cassandra connector (DBZ-1408)

    • Propagate MongoDB replicator exceptions so they are visible from Kafka Connect’s status endpoint (DBZ-1583)

    • Envelope methods should accept Instant rather than long values for timestamps (DBZ-1607)

    • Erroneously reporting no tables captured (DBZ-1519)

    • Avoid Oracle connector attempting to analyze tables (DBZ-1569)

    • Toasted columns should contain null in before rather than __debezium_unavailable_value (DBZ-1570)

    • Support PostgreSQL 11+ TRUNCATE operations using pgoutput decoder (DBZ-1576)

    • PostgreSQL connector times out in schema discovery for databases with many tables (DBZ-1579)

    • Value of ts_ms is not correct duing snapshot processing (DBZ-1588)

    • Heartbeats are not generated for non-whitelisted tables (DBZ-1592)

    It is my pleasure to announce the release of Debezium 1.0.0.Beta2!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • Support PostgreSQL LTREE columns with a logical data type (DBZ-1336)

    • Support for PostgreSQL 12 (DBZ-1542)

    • Validate configured PostgreSQL replication slot not contains no invalid characters (DBZ-1525)

    • Add MySQL DDL parser support for index creation VISIBLE and INVISIBLE keywords (DBZ-1534)

    • Add MySQL DDL parser support for granting SESSION_VARIABLES_ADMIN (DBZ-1535)

    • Fix MongoDB collection source struct field when collection name contains a dot (DBZ-1563)

    • Close idle transactions after performing a PostgreSQL snapshot (DBZ-1564)

    History is in the making as Debezium begins to sprint to its 1.0 milestone. It’s my pleasure to announce the release of Debezium 1.0.0.Beta1!

    This new Debezium release includes several notable new features, enhancements, and fixes:

    • ExtractNewDocumentState and EventRouter SMTs propagate heartbeat & schema change messages (DBZ-1513)

    • Provides alternative mapping for INTERVAL columns via interval.handling.mode (DBZ-1498)

    • Ensure message keys have the right column order (DBZ-1507)

    • Warn of table locking problems in connector logs (DBZ-1280)

    On behalf of the Debezium community it’s my great pleasure to announce the release of Debezium 0.10.0.Final!

    As you’d expect it, there were not many changes since last week’s CR2, one exception being a performance fix for the pgoutput plug-in of the Postgres connector, which may have suffered from slow processing when dealing with many small transactions in a short period of time (DBZ-1515).

    This release finalizes the work of overall eight preview releases. We have discussed the new features and changes in depth in earlier announcements, but here are some highlights of Debezium 0.10:

    I’m very happy to announce the release of Debezium 0.10.0.CR2!

    After the CR1 release we decided to do another candidate release, as there was not only a good number of bug fixes coming in, but also a few very useful feature implementations were provided by the community, which we didn’t want to delay. So we adjusted the original plan a bit and now aim for Debezium 0.10 Final in the course of next week, barring any unforeseen regressions.

    As usual, let’s take a closer look at some of the new features and resolved bugs.

    The Debezium community is on the homestretch towards the 0.10 release and we’re happy to announce the availability of Debezium 0.10.0.CR1!

    Besides a number of bugfixes to the different connectors, this release also brings a substantial improvement to the way initial snapshots can be done with Postgres. Unless any major regressions show up, the final 0.10 release should follow very soon.

    The temperatures are slowly cooling off after the biggest summer heat, an the Debezium community is happy to announce the release of Debezium 0.10.0.Beta4. In this release we’re happy to share some news we don’t get to share too often: with Apache Cassandra, another database gets added to the list of databases supported by Debezium!

    In addition, we finished our efforts for rebasing the existing Postgres connector to Debezium framework structure established for the SQL Server and Oracle connectors. This means more shared coded between these connectors, and in turn reduced maintenance efforts for the development team going forward; but there’s one immediately tangible advantage for you coming with this, too: the Postgres connector now exposes the same metrics you already know from the other connectors.

    Finally, the new release contains a range of bugfixes and other useful improvements. Let’s explore some details below.

    The summer is at its peak but Debezium community is not relenting in its effort so the Debezium 0.10.0.Beta3 is released.

    This version not only continues in incremental improvements of Debezium but also brings new shiny features.

    All of you who are using PostgreSQL 10 and higher as a service offered by different cloud providers definitely felt the complications when you needed to deploy logical decoding plugin necessary to enable streaming. This is no longer necessary. Debezium now supports (DBZ-766) pgoutput replication protocol that is available out-of-the-box since PostgreSQL 10.

    It’s my pleasure to announce the release of Debezium 0.10.0.Beta2!

    This further stabilizes the 0.10 release line, with lots of bug fixes to the different connectors. 23 issues were fixed for this release; a couple of those relate to the DDL parser of the MySQL connector, e.g. around RENAME INDEX (DBZ-1329), SET NEW in triggers (DBZ-1331) and function definitions with the COLLATE keyword (DBZ-1332).

    For the Postgres connector we fixed a potential inconsistency when flushing processed LSNs to the database (DBZ-1347). Also the "include.unknown.datatypes" option works as expected now during snapshotting (DBZ-1335) and the connector won’t stumple upon materialized views during snapshotting any longer (DBZ-1345).

    Another week, another Debezium release — I’m happy to announce the release of Debezium 0.10.0.Beta1!

    Besides the upgrade to Apache Kafka 2.2.1 (DBZ-1316), this mostly fixes some bugs, including a regression to the MongoDB connector introduced in the Alpha2 release (DBZ-1317).

    A very welcomed usability improvement is that the connectors will log a warning now if not at least one table is actually captured as per the whitelist/blacklist configuration (DBZ-1242). This helps to prevent the accidental exclusion all tables by means of an incorrect filter expression, in which case the connectors "work as intended", but no events are propagated to the message broker.

    Please see the release notes for the complete list of issues fixed in this release. Also make sure to examine the upgrade guidelines for 0.10.0.Alpha1 and Alpha2 when upgrading from earlier versions.

    Many thanks to community members Cheng Pan and Ching Tsai for their contributions to this release!

    Release early, release often — Less than a week since the Alpha1 we are announcing the release of Debezium 0.10.0.Alpha2!

    This is an incremental release that completes some of the tasks started in the Alpha1 release and provides a few bugfixes and also quality improvements in our Docker images.

    The change in the logic of the snapshot field has been delivered (DBZ-1295) as outlined in the last announcement. All connectors now provide information which of the records is the last one in the snapshot phase so that downstream consumers can react to this.

    I’m very happy to announce the release of Debezium 0.10.0.Alpha1!

    The major theme for Debezium 0.10 will be to do some clean-up (that’s what you do at this time of the year, right?); we’ve planned to remove a few deprecated features and to streamline some details in the structure the CDC events produced by the different Debezium connectors.

    This means that upgrading to Debezium 0.10 from earlier versions might take a bit more planning and consideration compared to earlier upgrades, depending on your usage of features and options already marked as deprecated in 0.9 and before. But no worries, we’re describing all changes in great detail in this blog post and the release notes.

    It’s my pleasure to announce the release of Debezium 0.9.5.Final!

    This is a recommended update for all users of earlier versions; besides bug fixes also a few new features are provide. The release contains 18 resolved issues overall.

    The Debezium team is happy to announce the release of Debezium 0.9.3.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions, but there are few significant new features too. Overall, 17 issues were resolved.

    Container images will be released with a small delay due to some Docker Hub configuration issues.

    The Debezium team is happy to announce the release of Debezium 0.9.2.Final!

    This is mostly a bug-fix release and a drop-in replacement for earlier Debezium 0.9.x versions. Overall, 18 issues were resolved.

    A couple of fixes relate to the Debezium Postgres connector:

    Quickly following up to last week’s release of Debezium 0.9, it’s my pleasure today to announce the release of Debezium 0.9.1.Final!

    This release fixes a couple of bugs which were reported after the 0.9 release. Most importantly, there are two fixes to the new Debezium connector for SQL Server, which deal with correct handling of LSNs after connector restarts (DBZ-1128, DBZ-1131). The connector also uses more reasonable defaults for the selectMethod and fetchSize options of the SQL Server JDBC driver (DBZ-1065), which can help to significantly increase through-put and reduce memory consumption of the connector.

    The MySQL connector supports GENERATED columns now with the new Antlr-based DDL parser (DBZ-1123), and for the Postgres connector the handling of primary key column definition changes was improved (DBZ-997).

    I’m delighted to announce the release of Debezium 0.9 Final!

    This release only adds a small number of changes since last week’s CR1 release; most prominently there’s some more metrics for the SQL Server connector (lag behind master, number of transactions etc.) and two bug fixes related to the handling of partitioned tables in MySQL (DBZ-1113) and Postgres (DBZ-1118).

    Having been in the works for six months after the initial Alpha release, Debezium 0.9 comes with a brand new connector for SQL Server, lots of new features and improvements for the existing connectors, updates to the latest versions of Apache Kafka and the supported databases as well as a wide range of bug fixes.

    Reaching the home stretch towards Debezium 0.9, it’s with great pleasure that I’m announcing the first release of Debezium in 2019, 0.9.0.CR1!

    For this release we’ve mainly focused on sorting out remaining issues in the Debezium connector for SQL Server; the connector comes with greatly improved performance and has received a fair number of bug fixes.

    Other changes include a new interface for event handlers of Debezium’s embedded engine, which allows for bulk handling of change events, an option to export the scale of numeric columns as schema parameter, as well as a wide range of bug fixes for the Debezium connectors for MySQL, Postgres and Oracle.

    With only a few days left for the year, it’s about time for another Debezium release; so it’s with great pleasure that I’m announcing Debezium 0.9.0.Beta2!

    This release comes with support for MySQL 8 and Oracle 11g; it includes a first cut of metrics for monitoring the SQL Server and Oracle connectors, several improvements to the MongoDB event flattening SMT as well as a wide range of bug fixes. Overall, not less than 42 issues were addressed; very clearly, there has to be some deeper sense in that ;)

    A big shout out goes to the following members Debezium’s amazing community, who contributed to this release: Eero Koplimets, Grzegorz Kołakowski, Hanlin Liu, Lao Mei, Renato Mefi, Tautvydas Januskevicius, Wout Scheepers and Zheng Wang!

    In the following, let’s take a closer look at some of the changes coming with the 0.9 Beta2 release.

    It’s my pleasure to announce the release of Debezium 0.9.0.Beta1! Oh, and to those of you who are celebrating it — Happy Thanksgiving!

    This new Debezium release comes with several great improvements to our work-in-progress SQL Server connector:

    • Initial snapshots can be done using the snapshot isolation level if enabled in the DB (DBZ-941)

    • Changes to the structures of captured tables after the connector has been set up are supported now (DBZ-812)

    • New connector option decimal.handling.mode (DBZ-953) and pass-through of any database.* option to the JDBC driver (DBZ-964)

    It’s my pleasure to announce the release of Debezium 0.9.0.Alpha2!

    While the work on the connectors for SQL Server and Oracle continues, we decided to do another Alpha release, as lots of fixes and new features - many of them contributed by community members - have piled up, which we wanted to get into your hands as quickly as possible.

    This release supports Apache Kafka 2.0, comes with support for Postgres' HSTORE column type, allows to rename and filter fields from change data messages for MongoDB and contains multiple bug fixes and performance improvements. Overall, this release contains 55 fixes (note that a few of these have been merged back to 0.8.x and are contained in earlier 0.8 releases, too).

    A big "Thank You" is in order to community members Andrey Pustovetov, Artiship Artiship, Cliff Wheadon, Deepak Barr, Ian Axelrod, Liu Hanlin, Maciej Bryński, Ori Popowski, Peng Lyu, Philip Sanetra, Sagar Rao and Syed Muhammad Sufyian for their contributions to this release. We salute you!

    Just two weeks after the Debezium 0.8 release, I’m very happy to announce the release of Debezium 0.9.0.Alpha1!

    The main feature of the new version is a first work-in-progress version of the long-awaited Debezium connector for MS SQL Server. Based on the CDC functionality available in the Enterprise and Standard editions, the new connector lets you stream data changes out of Microsoft’s popular RDBMS.

    Besides that we’ve continued the work on the Debezium Oracle connector. Most notably, it supports initial snapshots of captured tables now. We’ve also upgraded Apache Kafka in our Docker images to 1.1.1 (DBZ-829).

    Please take a look at the change log for the complete list of changes in 0.9.0.Alpha1 and general upgrade notes.

    Note: At the time of writing (2018-07-26), the release artifacts (connector archives) are available on Maven Central. We’ll upload the Docker images for 0.9.0.Alpha1 to Docker Hub as soon as possible. The Docker images are already uplodaded and ready for use under tags 0.9.0.Alpha1 and rolling 0.9.

    \ No newline at end of file diff --git a/tag/tensorflow/index.html b/tag/tensorflow/index.html index d79a72a0f5..f632187ad3 100644 --- a/tag/tensorflow/index.html +++ b/tag/tensorflow/index.html @@ -1 +1 @@ - Tag: tensorflow

    Debezium Blog

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    \ No newline at end of file + Tag: tensorflow

    Debezium Blog

    With the recent success of ChatGPT, we can observe another wave of interest in the AI field and machine learning in general. The previous wave of interest in this field was, at least to a certain extent, caused by the fact that excellent ML frameworks like TensorFlow, PyTorch or general data processing frameworks like Spark became available and made the writing of ML models much more straightforward. Since that time, these frameworks have matured, and writing models are even more accessible, as you will see later in this blog. However, data set preparation and gathering data from various sources can sometimes take time and effort. Creating a complete pipeline that would pull existing or newly created data, adjust it, and ingest it into selected ML libraries can be challenging. Let’s investigate if Debezium can help with this task and explore how we can leverage Debezium’s capabilities to make it easier.

    \ No newline at end of file diff --git a/tag/testcontainers/index.html b/tag/testcontainers/index.html index 4768234387..2f89364a0a 100644 --- a/tag/testcontainers/index.html +++ b/tag/testcontainers/index.html @@ -1 +1 @@ - Tag: testcontainers

    Debezium Blog

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    Setting up change data capture (CDC) pipelines with Debezium typically is a matter of configuration, without any programming being involved. It’s still a very good idea to have automated tests for your CDC set-up, making sure that everything is configured correctly and that your Debezium connectors are set up as intended.

    There’s two main components involved whose configuration need consideration:

    • The source database: it must be set up so that Debezium can connect to it and retrieve change events; details depend on the specific database, e.g. for MySQL the binlog must be in "row" mode, for Postgres, one of the supported logical decoding plug-ins must be installed, etc.

    • The Debezium connector: it must be configured using the right database host and credentials, possibly using SSL, applying table and column filters, potentially one or more single message transformations (SMTs), etc.

    \ No newline at end of file + Tag: testcontainers

    Debezium Blog

    I am happy to announce the release of Debezium 1.2.1.Final!

    This release includes several bug fixes to different Debezium connectors, and we highly recommend the upgrade from 1.2.0.Final and earlier versions:

    • The Debezium Postgres connector may have missed events from concurrent transactions when transitioning from snapshotting to streaming events from the WAL (DBZ-2288); this is fixed now when using the exported snapshotting mode; this mode should preferably be used, and for Debezium 1.3 we’re planning for this to be the basis for all the existing snapshotting modes

    • The Postgres JDBC driver got upgraded to 42.2.14 (DBZ-2317), which fixes a CVE in the driver related to processing XML column values sourced from untrusted XML input

    • The Debezium MySQL connector MariaDB’s supports ALTER TABLE statements with IF EXISTS (DBZ-2219); it also handles single dimension DECIMAL columns in CAST expressions (DBZ-2305)

    • The MySQL connector automatically filters out specific DML binlog entries from internal tables when using it with Amazon RDS (DBZ-2275)

    • The Debezium MongoDB connector got more resilient against connection losses (DBZ-2141)

    It’s my pleasure to announce the release of Debezium 1.2.0.CR1!

    This release includes several notable features, enhancements, and fixes:

    • PostgreSQL can restrict the set of tables with a publication while using pgoutput (DBZ-1813).

    • Metrics MBean registration is skipped if a platform MBean server does not exist (DBZ-2089).

    • SQL Server reconnection improved during shutdown and connection resets (DBZ-2106).

    • EventRouter SMT can now pass non-String based keys (DBZ-2152).

    • PostgreSQL include.unknown.datatypes can now return strings rather than hashes (DBZ-1266).

    • Debezium Server now supports Google Cloud PubSub (DBZ-2092).

    • Debezium Server now supports Apache Pulsar sink (DBZ-2112).

    You can find the complete list of addressed issues, upgrade procedures, and notes on any backward compatibility changes in the release notes.

    I’m very happy to share the news that Debezium 1.2.0.Beta2 has been released!

    Core feature of this release is Debezium Server, a dedicated stand-alone runtime for Debezium, opening up its open-source change data capture capabilities towards messaging infrastructure like Amazon Kinesis.

    Overall, the community has fixed 25 issues since the Beta1 release, some of which we’re going to explore in more depth in the remainder of this post.

    Setting up change data capture (CDC) pipelines with Debezium typically is a matter of configuration, without any programming being involved. It’s still a very good idea to have automated tests for your CDC set-up, making sure that everything is configured correctly and that your Debezium connectors are set up as intended.

    There’s two main components involved whose configuration need consideration:

    • The source database: it must be set up so that Debezium can connect to it and retrieve change events; details depend on the specific database, e.g. for MySQL the binlog must be in "row" mode, for Postgres, one of the supported logical decoding plug-ins must be installed, etc.

    • The Debezium connector: it must be configured using the right database host and credentials, possibly using SSL, applying table and column filters, potentially one or more single message transformations (SMTs), etc.

    \ No newline at end of file diff --git a/tag/tests/index.html b/tag/tests/index.html index 959758d78f..2fc6998f1c 100644 --- a/tag/tests/index.html +++ b/tag/tests/index.html @@ -1 +1 @@ - Tag: tests

    Debezium Blog

    When developing the tests for your project, sooner or later you will probably get into the situation when some of the tests fail randomly. These tests, also known as flaky tests, are very unpleasant as you never know if the failure was random or there is a regression in your code. In the worst case you just ignore these tests because you know they are flaky. Most of the testing frameworks even have a dedicated annotation or other means to express that the test is flaky and if it fails, the failure should be ignored. The value of such a test is very questionable. The best thing you can do with such a test is of course to fix it so that it doesn’t fail randomly. That’s easy to say, but harder to do. The hardest part is usually to make the test fail in your development environment so that you can debug it and understand why it fails and what is the root cause of the failure. In this blog post I’ll try to show a few techniques which may help you to simulate random test failures on you local machine.

    \ No newline at end of file + Tag: tests

    Debezium Blog

    When developing the tests for your project, sooner or later you will probably get into the situation when some of the tests fail randomly. These tests, also known as flaky tests, are very unpleasant as you never know if the failure was random or there is a regression in your code. In the worst case you just ignore these tests because you know they are flaky. Most of the testing frameworks even have a dedicated annotation or other means to express that the test is flaky and if it fails, the failure should be ignored. The value of such a test is very questionable. The best thing you can do with such a test is of course to fix it so that it doesn’t fail randomly. That’s easy to say, but harder to do. The hardest part is usually to make the test fail in your development environment so that you can debug it and understand why it fails and what is the root cause of the failure. In this blog post I’ll try to show a few techniques which may help you to simulate random test failures on you local machine.

    \ No newline at end of file diff --git a/tag/time series/index.html b/tag/time series/index.html index eb94d6afaa..01d51b877d 100644 --- a/tag/time series/index.html +++ b/tag/time series/index.html @@ -1 +1 @@ - Tag: time series

    Debezium Blog

    This tutorial was originally published by QuestDB, where guest contributor, Yitaek Hwang, shows us how to stream data into QuestDB with change data capture via Debezium and Kafka Connect.

    \ No newline at end of file + Tag: time series

    Debezium Blog

    This tutorial was originally published by QuestDB, where guest contributor, Yitaek Hwang, shows us how to stream data into QuestDB with change data capture via Debezium and Kafka Connect.

    \ No newline at end of file diff --git a/tag/timescaledb/index.html b/tag/timescaledb/index.html index fcd420bdfd..b8f2021cc0 100644 --- a/tag/timescaledb/index.html +++ b/tag/timescaledb/index.html @@ -1 +1 @@ - Tag: timescaledb

    Debezium Blog

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    \ No newline at end of file + Tag: timescaledb

    Debezium Blog

    In this article, we are going to present and demonstrate a new feature delivered in Debezium 2.4 - the integration with the TimescaleDB database.

    TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is implemented as an extension for the PostgreSQL database. This fact leads us to re-use the standard Debezium PostgreSQL connector and implement TimescaleDB support as a single message transform (SMT).

    \ No newline at end of file diff --git a/tag/topics/index.html b/tag/topics/index.html index 9da1944466..0bfa447160 100644 --- a/tag/topics/index.html +++ b/tag/topics/index.html @@ -7,4 +7,4 @@ num.partitions = 1 compression.type = producer log.cleanup.policy = delete -log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    \ No newline at end of file +log.retention.ms = 604800000 ## 7 days

    But often, when you use Debezium and Kafka in a production environment you might choose to disable Kafka’s topic auto creation capability with auto.create.topics.enable = false, or you want the connector topics to be configured differently from the default. In this case you have to create topics for Debezium’s captured data sources upfront.
    But there’s good news! Beginning with Kafka Connect version 2.6.0, this can be automated since KIP-158 is implemented to enable customizable topic creation with Kafka Connect.

    \ No newline at end of file diff --git a/tag/tracing/index.html b/tag/tracing/index.html index 52f228746d..334c252434 100644 --- a/tag/tracing/index.html +++ b/tag/tracing/index.html @@ -1 +1 @@ - Tag: tracing

    Debezium Blog

    The current pattern in application development gravitates toward microservices and microservices architecture. While this approach gives the developer teams great flexibility in terms of independent deployments and development velocity, the drawback is at hand when you try to track a bug in production. Monolithic applications sit nicely at a single place so you can introspect the code flows and the application’s runtime state. This is more challenging with microservice architectures, as a single business transaction...

    \ No newline at end of file + Tag: tracing

    Debezium Blog

    The current pattern in application development gravitates toward microservices and microservices architecture. While this approach gives the developer teams great flexibility in terms of independent deployments and development velocity, the drawback is at hand when you try to track a bug in production. Monolithic applications sit nicely at a single place so you can introspect the code flows and the application’s runtime state. This is more challenging with microservice architectures, as a single business transaction...

    \ No newline at end of file diff --git a/tag/transactions/index.html b/tag/transactions/index.html index 50e390e2d4..0cdaaa33bb 100644 --- a/tag/transactions/index.html +++ b/tag/transactions/index.html @@ -1 +1 @@ - Tag: transactions

    Debezium Blog

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    \ No newline at end of file + Tag: transactions

    Debezium Blog

    Every now and then there is a questions in the Debezium chat or on the mailing list how to ensure exactly-once delivery of the records produced by Debezium. So far Debezium aimed only for at-least-once delivery. This means Debezium guarantees every single change will be delivered and there is no missing or skipped change event. However, in case of failures, restarts or DB connection drops, the same event can be delivered more than once. Typical scenario is that the event is delivered twice - once before failure/restart and second time after that. Exactly-once delivery (or semantic) provides stronger guarantee - every single message will be delivered and at the same time there won’t be any duplicates, every single message will be delivered exactly once. So far our answer was that the users have to implement their own deduplication system if they need exactly-once delivery. However, with Kafka Connect support for exactly-once delivery, it seems we can provide exactly-once delivery for Debezium connectors out-of-the-box, only with a little configuration change.

    \ No newline at end of file diff --git a/tag/ui/index.html b/tag/ui/index.html index 6452a49a6f..b347d738aa 100644 --- a/tag/ui/index.html +++ b/tag/ui/index.html @@ -1 +1 @@ - Tag: ui

    Debezium Blog

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    \ No newline at end of file + Tag: ui

    Debezium Blog

    Hello everyone, Jakub here. You may have noticed that there wasn’t much happening around Debezium UI lately. This, however, would be only partially true. We own you an explanation in this regard, so please bear with me. Let’s start with the status of the current UI project. It became increasing clear that while UI for Debezium is an important part of our vision, developing a UI strictly tied to Kafka Connect is not the right...

    \ No newline at end of file diff --git a/tag/vagrant/index.html b/tag/vagrant/index.html index ab39b5de3b..789e24496a 100644 --- a/tag/vagrant/index.html +++ b/tag/vagrant/index.html @@ -1 +1 @@ - Tag: vagrant

    Debezium Blog

    The Debezium project strives to provide an easy deployment of connectors, so users can try and run connectors of their choice mostly by getting the right connector archive and unpacking it into the plug-in path of Kafka Connect.

    This is true for all connectors but for the Debezium PostgreSQL connector. This connector is specific in the regard that it requires a logical decoding plug-in to be installed inside the PostgreSQL source database(s) themselves. Currently, there are two supported logical plug-ins:

    • postgres-decoderbufs, which uses Protocol Buffers as a very compact transport format and which is maintained by the Debezium community

    • JSON-based, which is based on JSON and which is maintained by its own upstream community

    \ No newline at end of file + Tag: vagrant

    Debezium Blog

    The Debezium project strives to provide an easy deployment of connectors, so users can try and run connectors of their choice mostly by getting the right connector archive and unpacking it into the plug-in path of Kafka Connect.

    This is true for all connectors but for the Debezium PostgreSQL connector. This connector is specific in the regard that it requires a logical decoding plug-in to be installed inside the PostgreSQL source database(s) themselves. Currently, there are two supported logical plug-ins:

    • postgres-decoderbufs, which uses Protocol Buffers as a very compact transport format and which is maintained by the Debezium community

    • JSON-based, which is based on JSON and which is maintained by its own upstream community

    \ No newline at end of file diff --git a/tag/vitess/index.html b/tag/vitess/index.html index c8f5b50d25..d976f52d30 100644 --- a/tag/vitess/index.html +++ b/tag/vitess/index.html @@ -1 +1 @@ - Tag: vitess

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    This post originally appeared on the Bolt Labs Engineering blog.

    Traditionally, MySQL has been used to power most of the backend services at Bolt. We’ve designed our schemas in a way that they’re sharded into different MySQL clusters. Each MySQL cluster contains a subset of data and consists of one primary and multiple replication nodes.

    Once data is persisted to the database, we use the Debezium MySQL Connector to capture data change events and send them to Kafka. This gives us an easy and reliable way to communicate changes between back-end microservices.

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    \ No newline at end of file + Tag: vitess

    Debezium Blog

    Just in time for the holidays, it’s my pleasure to announce the fifth maintenance release for Debezium 3, 3.0.5.Final. This release introduces several minor improvements, bugfixes, and some breaking changes. Let’s take a few moments and dive into the details…​

    I am excited to announce the second maintenance release for the Debezium 3 release stream, 3.0.2.Final. This maintenance release introduces a number of features, let’s take a moment and dive into the highlights.

    I am pleased to announce the first maintenance release for the Debezium 3 release stream, 3.0.1.Final. This maintenance release introduces several new features including support for Cassandra 5, PostgreSQL 17, and MySQL 9.1. In addition, there are some configuration improvements with Debezium Server supporting YAML.

    It’s with immense joy and pleasure to announce the availability of Debezium 3.0.0.Final!

    We released Debezium 2.0 nearly 2 years ago, and in that time, the platform has continued to grow, introducing sink-based connectors, new community-led connectors, and an extensive list of features and improvements to the core platform and connectors. With the community’s help, Debezium remains the de facto leader in CDC.

    The 3.0 release marks another milestone for Debezium, one that we’re eager to share.

    As I mentioned last week, we are in the homestretch for the next major release. We’re happy to announce the next candidate release, Debezium 3.0.0.CR2!. Let’s take a quick look at the changes and improvements in this new release…​

    I am happy to announce the third maintenance release of the Debezium 2.7 release stream, 2.7.3.Final. This maintenance release focuses on addressing stability and regressions, with several improvements. Let’s dive into these changes…​

    The community is in the homestretch toward the next major milestone for Debezium, and we’re happy to announce the availability of Debezium 3.0.0.CR1!

    Beyond a number of bugfixes to connectors, this release also brings several improvements for MySQL, Oracle, and the Vitess connectors. Unless any major regressions show up, we can expect Debezium 3 in the not too distant future.

    I am happy to announce the second maintenance release of the Debezium 2.7 release stream, 2.7.2.Final. This maintenance release focuses on addressing stability and regressions, with some improvements such as support for DECIMAL(p) data types with Informix and fixing a regression with the JsonConverter and the TruncateColumn handler.

    Even as the summer heat continues to rise, the Debezium team has some new, cool news to share. We’re pleased to announce the first beta preview of Debezium 3, 3.0.0.beta1.

    This release includes a host of new features and improvements, including detailed metrics for creates, updates, and deletes per table, replication slot creation timeout, support for PgVector data types with PostgreSQL, a new Oracle embedded buffer implementation based on Ehcache, and others. Let’s take a few moments and dive into these new features and how you can take advantage of them in Debezium 3!

    I am pleased to announce the first maintenance release of Debezium 2.7, 2.7.1.Final. This maintenance release focuses on addressing a number of stability issues, including improvements to ad-hoc snapshots, closing of transformations in the embedded engine, improvements to the Oracle LogMiner implementation, Vitess epoch calculations, and more…​

    Let’s dive into these changes…​

    As the summer temperatures continue to rise, the Debezium community is pleased to announce Debezium 3.0.0.Alpha2 is now available for testing.

    This release includes a host of new features and improvements, including being built on top of Kafka 3.8, the relocation of the JDBC sink connector, custom converters support in Debezium Server, and several improvements to our community-led connectors.

    We are happy to announce the first pre-release of Debezium 3, 3.0.0.Alpha1. This release, albeit smaller than our normal pre-releases, is highly focused on a few key points, such as testing the release process with Java 17/21; however, it also includes several new features. Let’s take a moment and talk about the upcoming breaking changes in-depth and the new features you will find.

    As the team leaps into Q3, we’re happy to announce the fruits of our Q2 work, Debezium 2.7.0.Final is now generally available. This release includes changes for 140 issues with contributions from over 51 contributors. Let’s take a moment and review all the changes.

    Although half of 2024 is nearly behind us, the team is pleased to announce the first beta preview release for Debezium, 2.7.0.Beta1.

    This release includes incubating support for Db2 on z/OS, authentication and encryption with NATS JetStream, improvements for the MariaDB JDBC sink dialect, JMX Exporter with Debezium Server images, configurable metrics in Debezium Operator, and more.

    Let’s walk through all the highlights and discuss these in more depth…​

    I’m pleased to announce the immediate availability of Debezium 2.6.2.Final. This release is the second maintenance release that focuses on addressing several critical stability issues with the 2.6.1.Final release, support for Oracle database query filtering with more than one thousand tables, fixed race condition with PostgreSQL offset flushing, fixed Avro compatibility, and more.

    Let’s take a few moments and dive into these and more…​

    The old saying is "April showers bring May flowers"; however, in this case it seems a new Debezium release has sprouted packed with many new features. We’re pleased to announce the release of Debezium 2.7.0.Alpha2, the next pre-release in the Debezium 2.7 stream, is now available for testing.

    This release includes new ROW_ID serialization for the Oracle connector, PostgreSQL array support for the JDBC sink connector, NATs authentication with Debezium Server, performance improvements with Oracle LogMiner and large tables, and more. Let’s walk through the highlights of this release and discuss these and more in-depth…​

    As the temperature for summer continues to rise, I’m please to announce that Debezium has some really cool news, Debezium 2.7.0.Alpha1 is now available for testing. This release includes a variety of new changes and improvements across various connectors like MongoDB, MariaDB, MySQL, Oracle, Vitess, and the Kubernetes Operator, to a myriad of subtle fixes and improvements across the entire Debezium portfolio. Let’s take a moment and dive into some highlights…​

    I’m pleased to announce the immediate availability of Debezium 2.6.1.Final. This release is the first maintenance release that focuses on addressing several critical stability issues with the 2.6.0.Final release, including classpath loading problems with Debezium Server, MongoDB silently stops gathering changes, and a race condition with the Oracle Infinispan buffer implementation.

    Let’s take a few moments and dive into these and more…​

    As the team has sprung forward into action, with spring upon us, and we’re in the summer spirit, we are pleased to announce the immediate release of Debezium 2.6.0.Final. This release includes dozens of new features, bug fixes, and improvements from the valiant efforts of the team and community contributors. Overall, there were 249 issues resolved with contributions from over 56 contributors. Lets a moment and review all the changes.

    As we are just a week away from Debezium 2.6.0.Final, I am pleased to announce Debezium 2.6.0.CR1, the first release candidate for the 2.6 release stream. This release includes a number of improvements, including XML support for the Oracle OpenLogReplicator adapter, TRACE level logging support for Debezium Server, configurable partition modes for Cassandra, the new Snapshot API for MongoDB and Db2, and more.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.6.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    We are happy to announce the third maintenance release of the Debezium 2.5 release stream, Debezium 2.5.3.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    We are pleased to announce the release of Debezium 2.6.0.Beta1. We enter the home stretch with this release, packed with many improvements, enhancements, bug fixes, and yes a brand new Db2 connector for iSeries. There is a lot to cover in this release, so lets dive right in! Breaking changes The team aims to avoid any potential breaking changes between minor releases; however, such changes are sometimes inevitable. Oracle In older versions of Debezium, users...

    We are happy to announce the second maintenance release of the Debezium 2.5 release stream, Debezium 2.5.2.Final. This release includes some improvements and numerous bug fixes, so let’s dive right in…​

    As we’ve hit the mid-mark of the quarter, the team is pleased to announce the second installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha2. This release is filled to the brim with new features, improvements, and bug fixes, so let’s dive into these…​

    While we remain on track with the upcoming Debezium 2.6 release, we are continuously looking at improvements and fixes that are being made that ensures that older releases continue to provide the best possible experience. With that, the team is pleased to announce the first maintenance release of Debezium 2.5, Debezium 2.5.1.Final. Let’s dive into what this release includes and what you should be aware of while upgrading…​

    A new year, a new preview release, in true Debezium fashion. The team is pleased to announce the first installment of the Debezium 2.6 release stream, Debezium 2.6.0.Alpha1. Let’s take a moment and dive into these new features, understand how to use these to improve your change data capture experience…​

    As the winter chill settles in, and we transition to the festive holiday season, our dedicated team has been busy preparing a special gift for the Debezium community. I am excited to share the immediate release of Debezium 2.5.0.Final, just in time for the holiday celebrations. Get ready to unwrap this latest minor version, filled with holiday cheer and exciting new features!

    Let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.5, which includes 181 issues resolved by 37 unique contributors.

    As we are just one step away from the Debezium 2.5 final release, I am pleased to announce that Debezium 2.5.0.CR1 is now available. This release includes a number of improvements like AWS SQS sink for Debezium Server, INSERT/DELETE semantics for incremental snapshot watermarking, ReselectColumnsPostProcessor, uniform Oracle LOB behavior.

    Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.CR1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As we begin to approach the final stretch for Debezium 2.5, I am pleased to announce that Debezium 2.5.0.Beta1 is now available. This release includes a number of improvements like support for MariaDB GTID, partitioning for Debezium Server EventHub’s sink, native RabbitMQ streams Debezium Server sink, streaming from PostgreSQL 16 stand-by databases, MySQL high-precision source timestamps, field inclusion/exclusion with JDBC sink, some additional notifications for initial snapshots, and service account support for Debezium Operator CRDs. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at all these changes and improvements included in Debezium 2.5.0.Beta1; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    As the year starts to come to a close, I am happy to announce the first maintenance release for the Debezium 2.4 release series, Debezium 2.4.1.Final.

    While our maintenance releases focus primarily on bug fixes and stability improvements, there are a few new features we back ported from our most recent Debezium 2.5 development series. All these new features focus on the Debezium Operator for Kubernetes, so lets take a quick deep dive into those details. As always, you can find the complete list of changes for this release in the release notes.

    While it has only been two short weeks since our first preview release for the Debezium 2.5 release stream, I am happy to announce the immediate availability of the next preview release, Debezium 2.5.0.Alpha2.

    This release includes a variety of improvements, batch support for the JDBC Sink connector, seamless support for MongoDB documents that exceed the 16MB barrier, MySQL 8.2 compatibility, and signal improvements for SQL Server. Additionally, this release includes a variety of bug fixes and several breaking changes.

    Let’s take a closer look at these changes and improvements that are included in Debezium 2.5.0.Alpha2; as always, you can find the complete list of changes for this release in the release notes. Please remember to take special note to any breaking changes that could affect your upgrade path.

    It’s been about three weeks since we released Debezium 2.4, and in that time the team has been diligently working on what comes next in the evolution of Debezium. I am pleased to announce that today we have released Debezium 2.5.0.Alpha1, the first preview release of Debezium’s 2.5 release stream. This release includes many new exciting features as well as bug fixes, e.g. a brand-new IBM Informix connector, a preview support for MariaDB with the...

    As the summer months wind down and we enter autumn with cooler temperatures, the team has diligently prepared the next major milestone of Debezium. It’s my pleasure to announce the immediate release of the next minor version, Debezium 2.4.0.Final.

    As the team begins the journey toward the next development iteration, let’s take a moment and review all the new features, changes, and improvements that are included in Debezium 2.4, which includes 231 issues resolved by 68 unique contributors.

    As the summer concludes for us in the north and we await the autumn colors, the team has been busy preparing for the next major release of Debezium 2.4. It’s my pleasure to announce today that we are nearly there with the release of Debezium 2.4.0.CR1. The focus for this release is primarily on stability; however, we do have a few new last minute addititons that we should highlight, so let’s dive right in, shall...

    It has been nearly two weeks since our last preview release of the Debezium 2.4 series, and I am thrilled to announcement the next installation of that series, Debezium 2.4.0.Beta2.

    While typically beta releases focus on stability and bugs, this release includes quite a number of noteworthy improves and new features including a new ingestion method for Oracle using OpenLogReplicator, a new single message transform to handle timezone conversions, custom authentication support for MongoDB, configurable order for the MongoDB aggregation pipeline, and lastly support for MongoDB 7.

    Let’s take a few moments and dive into all these new features, improvements, and changes in more detail.

    It is my pleasure to announce the immediate release of Debezium 2.3.3.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    While development remains steadfast as we continue forward on Debezium 2.4, I am thrilled to announce the immediate availability of Debezium 2.4.0.Beta1.

    While this release focuses on stability and bug fixes, there are several new noteworthy features including TimescaleDB support, JMX notifications using JSON payloads, multiple improvements to the Oracle connector’s metrics and embedded Infinispan buffer implementation, SQL Server heartbeats, Vitess shardless strategy, JDBC sink with SQL Server identity-based inserts, and much more. Let’s dive into each of thees new features and others in more detail.

    Despite summer being well underway, Debezium contributors remain hard at work, and it’s my pleasure to announce the next preview release of Debezium 2.4 series, 2.4.0.Alpha2. This preview release includes a mix of improvements, bug fixes, and new features that are available for the Debezium community to test and offer feedback. Some highlights from this release include ad-hoc blocking snapshots, source-to-sink column name propagation, support for alternative MySQL drivers, and all Cassandra connectors with Debezium...

    It is my pleasure to announce the immediate release of Debezium 2.3.2.Final.

    This release includes several bug fixes to address regressions, stability, documentation updates. If you are currently looking to upgrade to the Debezium 2.3.x release stream, we highly recommend you consider using this release. Let’s take a quick look into the regressions and bug fixes.

    It has been several weeks since we released the first installment of Debezium 2.3, and I’m excited to announce the next iteration of Debezium 2.3 with 2.3.1.Final. As with any micro-release, the focus is on stability and bug fixes, as well as adjustments to our documentation; however there are some changes that are noteworthy that I would like to take a few moments to highlight.

    It’s been a busy month in Debezium-land, and it’s my pleasure to announce the first release of Debezium 2.4 series, 2.4.0.Alpha1. This release includes a plethora of changes, 59 changes to be exact, that cover a wide range of resolved issues, improvement to stability, new features, and several breaking changes. Let’s dive into each of these and discuss them in more depth. Breaking changes New features Other changes Breaking changes MongoDB The MongoDB connector explicitly...

    The team has been quite busy these last couple months preparing for a condensed release timeline for Debezium 2.3, and I am thrilled to announce that the next installment has arrived, Debezium 2.3.0.Final is now available! Despite a condensed release schedule, this release is packed with tons of new features and improvements. Debezium 2.3 includes a brand-new notification subsystem, a rewrite of the signal subsystem to support additional means to send signals to Debezium connectors,...

    It is my pleasure to announce the next Debezium 2.3 release, 2.3.0.CR1!

    The main focus of this release is to stabilize the Debezium 2.3 release in preparation for a final release in the coming weeks, which typically means we’re focusing on bugfixes; however, this release includes two new features. Lets take a moment and dive into these new features and any bug fixes that are noteworthy!

    It’s my pleasure to announce the next release of the Debezium 2.3 series, 2.3.0.Beta1!

    While this release focuses primarily on bug fixes and stability improvements, there are some new improvements with the PostgreSQL connector and the new notification and channels subsystem. In addition, there are also some compatibility breaking changes.

    This release contains changes for 22 issues, so lets take a moment and dive into the new features and any potential bug fixes or breaking changes that are noteworthy!

    It’s my pleasure to announce the first release of the Debezium 2.3 series, 2.3.0.Alpha1!

    This release brings many new and exciting features as well as bug fixes, including Debezium status notifications, storage of Debezium state into a JDBC data store, configurable signaling channels, the ability to edit connector configurations via Debezium UI, the parallelization of Vitess shards processing, and much more.

    This release contains changes for 59 issues, so lets take a moment and dive into several of these new features and any potential bug fixes or breaking changes that are noteworthy!

    Today, it’s with great joy that we can announce the availability of Debezium 2.2.0.Final!

    Many of you may have noticed, this release cadence took a bit longer than our traditional three-months. While we normally prefer to keep to our usual cadence, this shift gives us a unique opportunity to ship Debezium 2.2 with tons of new features and bug fixes, but also major upgrades to several core components.

    The Debezium team is excited to announce the first release candidate of Deebzium 2.2, Debezium 2.2.0.CR1.

    This release primarily focuses on stability improvements and bug fixes; however, there are a number of new features and breaking changes. In this release, Debezium migrated to Quarkus 3.0.0.Final, there are performance improvements to Debezium Server Pulsar sink, Jolokia can be enabled inside Debezium’s Kafka Connect container image, incubating support for incremental snapshots on MongoDB multi-replica and sharded clusters, and the deprecation usage of Docker Hub for images.

    Let’s take a moment and dive into several of these and what it means moving forward!

    The team is excited to announce the first beta release of the Debezium 2.2 release stream, Debezium 2.2.0.Beta1.

    This release includes a plethora of bug fixes, improvements, and a number of new features including, but not limited to, a new JDBC sink connector implementation, MongoDB sharded cluster improvements, Google Spanner PostgreSQL dialect support, and a RabbitMQ sink implementation for Debezium Server to just name a few.

    Let’s take moment and dive into what’s new!

    Today, I am pleased to announce the third alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha3.

    This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, optional parallel snapshots, server-side MongoDB change stream filtering, surrogate keys for incremental snapshots, a new Cassandra connector for Cassandra Enterprise, much more.

    Let’s take moment and dive into some of these new features, improvements, and breaking changes.

    Today, I am pleased to announce the second alpha release in the 2.2 release stream, Debezium 2.2.0.Alpha2. This release includes a plethora of bug fixes, improvements, breaking changes, and a number of new features including, but not limited to, a new ExtractRecordChanges single message transformation, a Reactive-based implementation of the Debezium Outbox extension for Quarkus, a Debezium Storage module for Apache RocketMQ, and much more. Let’s take moment and dive into these new features, improvements, and breaking changes.

    The Debezium release cadence is in full swing as I’m excited to announce Debezium 2.1.2.Final!

    This release focuses primarily on bug fixes and stability; and it is the recommended update for all users from earlier versions. This release contains 28 resolved issues, so let’s take a moment and discuss a critical breaking change.

    It’s my pleasure to announce not only the first release of the Debezium 2.2 series, but also the first release of Debezium in 2023, 2.2.0.Alpha!

    The Debezium 2.2.0.Alpha1 release includes some breaking changes, a number of bug fixes, and some noteworthy improvements and features, including but not limited to:

    • [Breaking Change] - ZonedTimestamp values will no longer truncate fractional seconds.

    • [New] - Support ingesting changes from an Oracle logical stand-by database

    • [New] - Support Amazon S3 buckets using the Debezium Storage API

    • [New] - Support retrying database connections during connector start-up

    • [New] - Debezium Server sink connector support for Apache RocketMQ and Infinispan

    Today it’s my great pleasure to announce the availability of Debezium 2.1.0.Final!

    You might recently noticed that Debezium went a bit silent for the last few weeks. No, we are not going away. In fact the elves in Google worked furiously to bring you a present under a Christmas tree - Debezium Spanner connector.

    It’s my pleasure to announce the first release of the Debezium 2.1 series, 2.1.0.Alpha1!

    The Debezium 2.1.0.Alpha1 release includes quite a number of bug fixes but also some noteworthy improvements and new features including but not limited to:

    • Support for PostgreSQL 15

    • Single Message Transformation (SMT) predicate support in Debezium engine

    • Capturing TRUNCATE as change event in MySQL table topics

    • Oracle LogMiner performance improvements

    • New Redis-based storage module

    I’m excited to announce the release of Debezium 1.9.7.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 22 resolved issues overall.

    Today it’s my great pleasure to announce the availability of Debezium 2.0.0.Final!

    Since our 1.0 release in December 2019, the community has worked vigorously to build a comprehensive open-source low-latency platform for change data capture (CDC). Over the past three years, we have extended Debezium’s portfolio to include a stable connector for Oracle, a community led connector for Vitess, the introduction of incremental snapshots, multi-partition support, and so much more. With the help of our active community of contributors and committers, Debezium is the de facto leader in the CDC space, deployed to production within lots of organizations from across multiple industries, using hundreds of connectors to stream data changes out of thousands of database platforms.

    The 2.0 release marks a new milestone for Debezium, one that we are proud to share with each of you.

    I am excited to announce the release of Debezium 2.0.0.CR1!

    This release contains breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 53 issues that were fixed.

    I’m excited to announce the release of Debezium 1.9.6.Final!

    This release focuses on bug fixes and stability; and is the recommended update for all users from earlier versions. This release contains 78 resolved issues overall.

    I am excited to announce the release of Debezium 2.0.0.Beta2!

    This release contains several breaking changes, stability fixes, and bug fixes, all to inch us closer to 2.0.0.Final. Overall, this release contains a total of 107 issues that were fixed.

    I am thrilled to share that Debezium 2.0.0.Beta1 has been released!

    This release contains several new features including a pluggable topic selector, the inclusion of database user who committed changes for Oracle change events, and improved handling of table unique indices as primary keys. In addition, there are several breaking changes such as the move to multi-partition mode as default and the introduction of the debezium-storage module and its implementations. So lets take a look at all these in closer detail.

    With the summer in full swing, the team is pleased to announce the release of Debezium 1.9.5.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 24 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha3 has been released!

    While this release contains a plethora of bugfixes, there are a few noteworthy improvements, which include providing a timestamp in transaction metadata events, the addition of several new fields in Oracle’s change event source block, and a non-backward compatible change to the Oracle connector’s offsets.

    Lets take a look at these in closer detail.

    I’m pleased to announce the release of Debezium 1.9.4.Final!

    This release primarily focuses on bugfixes and stability; and is the recommended update for all users from earlier versions. This release contains 32 resolved issues overall.

    I am thrilled to share that Debezium 2.0.0.Alpha2 has been released!

    This release is packed with tons of bugfixes and improvements, 110 issues resolved in total. Just, WOW!

    A few noteworthy changes include incremental snapshots gaining support for regular expressions and a new stop signal. We also did some housekeeping and removed a number of deprecated configuration options and as well as the legacy MongoDB oplog implementation.

    Lets take a look at these in closer detail.

    As the summer nears, I’m excited to announce the release of Debezium 1.9.3.Final!

    This release primarily focuses on bugfixes and stability; however, there are some notable feature enhancements. Lets take a moment to cool off and "dive" into these new features in a bit of detail :).

    I am excited to share that Debezium 2.0.0.Alpha1 has been released!

    This release is the first of several planned pre-releases of Debezium 2.0 over the next five months. Each pre-release plans to focus on strategic changes in the hope that as we move forward, changes can be easily tested and regressions addressed quickly.

    In this release, some of the most notable changes include requiring Java 11 to use Debezium or any of its components, the removal of wal2json support for PostgreSQL and the legacy MySQL connector implementation, as well as some notable features such as improved Debezium Server Google Pub/Sub sink support, and a multitude of bugfixes. Let’s take a look at a few of these.

    I’m excited to announce the release of Debezium 1.9.1.Final!

    This release primarily focuses on bugfixes and stability concerns after the 1.9.0.Final release.

    I am very happy to share the news that Debezium 1.9.0.Final has been released!

    Besides the usual set of bug fixes and improvements, key features of this release are support for Apache Cassandra 4, multi-database support for the Debezium connector for SQL Server, the ability to use Debezium Server as a Knative event source, as well as many improvements to the integration of Debezium Server with Redis Streams.

    Exactly 276 issues have been fixed by the community for the 1.9 release; a big thank you to each and everyone who helped to make this happen!

    I am happy to announce the release of Debezium 1.9.0.CR1!

    Besides a range of bugfixes, this release brings the long-awaited support for Apache Cassandra 4! Overall, 52 issues have been fixed for this release.

    Let’s take a closer look at both the Cassandra 3 changes & Cassandra 4 support.

    I am happy to announce the release of Debezium 1.9.0.Beta1!

    This release includes many new features for Debezium Server, including Knative Eventing support and offset storage management with the Redis sink, multi-partitioned scaling for the SQL Server connector, and various of bugfixes and improvements. Overall, 56 issues have been fixed for this release.

    Let’s take a closer look at a couple of them.

    It’s my pleasure to announce the second release of the Debezium 1.9 series, 1.9.0.Alpha2!

    This release includes support for Oracle 21c, improvements around Redis for Debezium Server, configuring the kafka.query.timeout.ms option, and a number of bug fixes around DDL parsers, build infrastructure, etc.

    Overall, the community fixed 51 issues for this release. Let’s take a closer look at some of the highlights.

    It’s my pleasure to announce the first release of the Debezium 1.9 series, 1.9.0.Alpha1!

    With the new year comes a new release! The Debezium 1.9.0.Alpha1 release comes with quite a number of fixes and improvements, most notably improved metrics and Oracle ROWID data type support.

    It’s my great pleasure to announce the release of Debezium 1.8.0.Final!

    Besides a strong focus on the Debezium connector for MongoDB (more on that below), the 1.8 release brings support for Postgres' logical decoding messages, support for configuring SMTs and topic creation settings in the Debezium UI, and much more.

    Overall, the community has fixed 242 issues for this release. A big thank you to everyone who helped to make this release happen on time, sticking to our quarterly release cadence!

    I’m very excited to announce the release of Debezium 1.8.0.CR1!

    As were near the final release due out next week, this release focused heavily on bugfixes. Yet this release includes incremental snapshot support for MongoDB! Overall, not less than 34 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    I’m very happy to announce the release of Debezium 1.8.0.Beta1!

    This release is packed with exciting new features like support for MongoDB 5.0, an outbox event router for the MongoDB connector and support for Postgres logical decoding messages, as well as tons of bugfixes and other improvements. Overall, not less than 63 issues have been fixed for this release.

    Let’s take a closer look at some of them.

    It’s my pleasure to announce the second release of the Debezium 1.8 series, 1.8.0.Alpha2!

    With the holiday season just around the corner, the team’s release schedule remains steadfast. While Debezium 1.8.0.Alpha2 delivers quite a lot of bugfixes and minor changes, there are a few notable changes:

    • MySQL support for heartbeat action queries

    • Configurable transaction topic name

    In addition, the latest 1.2 tag of the debezium/tooling image is available. The newest version includes all the latest tools, including kcctl, a super simple, cuddly CLI for Apache Kafka Connect.

    It’s my pleasure to announce the first release of the Debezium 1.8 series, 1.8.0.Alpha1!

    With the colors of Autumn upon us, the team has been hard at work painting lines of code for this release. With Debezium 1.8.0.Alpha1 comes quite a number of improvements but most notably is the new native MongoDB 4.0 change streams support!

    It’s with great pleasure that I am announcing the release of Debezium 1.7.0.Final!

    Key features of this release include substantial improvements to the notion of incremental snapshotting (as introduced in Debezium 1.6), a web-based user Debezium user interface, NATS support in Debezium Server, and support for running Apache Kafka without ZooKeeper via the Debezium Kafka container image.

    Also in the wider Debezium community some exciting things happened over the last few months; For instance, we saw a CDC connector for ScyllaDB based on the Debezium connector framework, and there’s work happening towards a Debezium Server connector for Apache Iceberg (details about this coming soon in a guest post on this blog).

    It’s my pleasure to announce the second release of the Debezium 1.7 series, 1.7.0.Beta1!

    This release brings NATS Streaming support for Debezium Server along with many other fixes and enhancements. Also this release is the first one tested with Apache Kafka 2.8.

    It’s my pleasure to announce the first release of the Debezium 1.7 series, 1.7.0.Alpha1!

    With the summer in a full-swing, this release brings additional improvements to the Debezium Oracle connector but also to the others as well.

    I’m pleased to announce the release of Debezium 1.6.0.Final!

    This release is packed full with tons of new features, including support for incremental snapshotting that can be toggled using the new the Signal API. Based on the excellent paper DBLog: A Watermark Based Change-Data-Capture Framework by Netflix engineers Andreas Andreakis and Ioannis Papapanagiotou, the notion of incremental snapshotting addresses several requirements around snapshotting that came up repeatedly in the Debezium community:

    Let me announce the bugfix release of Debezium 1.5, 1.5.2.Final!

    This release is a rebuild of 1.5.1.Final using Java 8.

    Let me announce the bugfix release of Debezium 1.5, 1.5.1.Final!

    This release fixes a small set of issues discovered since the original release and few improvements into the documentation.

    I’m pleased to announce the release of Debezium 1.6.0.Beta1!

    This release introduces incremental snapshot support for SQL Server and Db2, performance improvements for SQL Server, support for BLOB/CLOB for Oracle, and much more. Lets take a few moments and explore some of these new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.6 series, 1.6.0.Alpha1!

    This release brings the brand new feature called incremental snapshots for MySQL and PostgreSQL connectors, a Kafka sink for Debezium Server, as well as a wide range of bug fixes and other small feature additions.

    I’m thrilled to announce the release of Debezium 1.5.0.Final!

    With Debezium 1.5, the LogMiner-based CDC implementation for Oracle moves from Incubating to Stable state, and there’s a brand-new implementation of the MySQL connector, which brings features like transaction metadata support. Other key features include support for a new "signalling table", which for instance can be used to implement schema changes with the Oracle connector, and support for TRUNCATE events with Postgres. There’s also many improvements to the community-led connectors for Vitess and Apache Cassandra, as well as wide range of bug fixes and other smaller improvements.

    It’s my pleasure to announce the release of Debezium 1.5.0.CR1!

    As we begin moving toward finalizing the Debezium 1.5 release stream, the Oracle connector has been promoted to stable and there were some TLS improvements for the Cassandra connector, as well as numerous bugfixes. Overall, 50 issues have been addressed for this release.

    We are very happy to announce the release of Debezium 1.5.0.Beta2!

    The main features of this release is the new Debezium Signaling Table support, Vitess SET type support, and a continued focus to minor improvements, bugfixes, and polish as we sprint to the finish line for the 1.5 release.

    Overall, the community fixed 54 issues since the Beta1 release, some of which we’ll explore more in-depth below.

    I’m very happy to announce the release of Debezium 1.5.0.Beta1!

    This release adds a brand-new component — the web-based Debezium UI --, transaction metadata support for the MySQL connector, a large number of improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, support for Vitess 9.0, and much more. Let’s explore some of the new features in the following.

    It’s my pleasure to announce the first release of the Debezium 1.5 series, 1.5.0.Alpha1!

    This release brings many improvements to the LogMiner-based capture implementation for the Debezium Oracle connector, a large overhaul of the MySQL connector, as well as a wide range of bug fixes and other small feature additions.

    I’m pleased to announce the release of Debezium 1.4.1.Final!

    We highly recommend upgrading from 1.4.0.Final and earlier versions as this release includes bug fixes and enhancements to several Debezium connectors which includes some of the following:

    I am pleased to announce the release of Debezium 1.4.0.Final!

    This release concludes the major work put into Debezium over the last three months. Overall, the community fixed 117 issues during that time, including the following key features and changes:

    • New Vitess connector, featured in an in-depth blog post by Kewei Shang

    • Fine-grained selection of snapshotted tables

    • PostgreSQL Snapshotter completion hook

    • Distributed Tracing

    • MySQL support for create or read records emitted during snapshot

    • Many Oracle Logminer adapter improvements

    • Full support for Oracle JDBC connection strings

    • Improved reporting of DDL errors

    I’m pleased to announce the release of Debezium 1.4.0.CR1!

    This release focuses primarily on polishing the 1.4 release.

    I’m pleased to announce the release of Debezium 1.4.0.Beta1!

    This release includes support for distributed tracing, lowercase table and schema naming for Db2, specifying MySQL snapshot records as create or read operations, and enhancements to Vitess for nullable and primary key columns.

    I’m excited to announce the release of Debezium 1.4.0.Alpha2!

    This second pass of the 1.4 release line provides a few useful new features:

    • New API hook for the PostgreSQL Snapshotter interface

    • Field renaming using ExtractNewRecordState SMT’s add.fields and add.headers configurations

    I’m excited to announce the release of Debezium 1.3.1.Final!

    This release primarily focuses on bugs that were reported after the 1.3 release. Most importantly, the following bugs were fixed related to the Debezium connector for Oracle LogMiner adapter thanks to the continued feedback by the Debezium community.

    • SQLExceptions thrown when using Oracle LogMiner (DBZ-2624)

    • LogMiner mining session stopped due to WorkerTask killed (DBZ-2629)

    This post originally appeared on the Bolt Labs Engineering blog.

    Traditionally, MySQL has been used to power most of the backend services at Bolt. We’ve designed our schemas in a way that they’re sharded into different MySQL clusters. Each MySQL cluster contains a subset of data and consists of one primary and multiple replication nodes.

    Once data is persisted to the database, we use the Debezium MySQL Connector to capture data change events and send them to Kafka. This gives us an easy and reliable way to communicate changes between back-end microservices.

    I am excited to announce the release of Debezium 1.4.0.Alpha1!

    This first pass of the 1.4 release line provides a few useful new features:

    • New Vitess connector

    • Allow fine-grained selection of snapshotted tables

    Overall, the community fixed 41 issues for this release. Let’s take a closer look at some of the highlights.

    It’s with great please that I’m announcing the release of Debezium 1.3.0.Final!

    As per Debezium’s quarterly release cadence, this wraps up the work of the last three months. Overall, the community has fixed 138 issues during that time, including the following key features and changes:

    • A new incubating LogMiner-based implementation for ingesting change events from Oracle

    • Support for Azure Event Hubs in Debezium Server

    • Upgrade to Apache Kafka 2.6

    • Revised filter option names

    • A new SQL Server connector snapshot mode, initial_only

    • Support for database-filtered columns for SQL Server

    • Additional connection options for the MongoDB connector

    • Improvements to ByteBufferConverter for implementing the outbox pattern with Avro as the payload format

    \ No newline at end of file diff --git a/tag/website/index.html b/tag/website/index.html index dfa45bceba..b0347d149d 100644 --- a/tag/website/index.html +++ b/tag/website/index.html @@ -1 +1 @@ - Tag: website

    Debezium Blog

    This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

    As you may have noticed, we have a new website with documentation, a blog, and information about the Debezium community and how you can contribute. Let us know what you think, and contribute improvements.

    \ No newline at end of file + Tag: website

    Debezium Blog

    This past summer has been a super exciting time for the team. Not only have we been working hard on Debezium 0.10 but we have unveiled some recent changes to debezium.io.

    As you may have noticed, we have a new website with documentation, a blog, and information about the Debezium community and how you can contribute. Let us know what you think, and contribute improvements.

    \ No newline at end of file